本文整理汇总了C++中OBJ_CONSTRUCT函数的典型用法代码示例。如果您正苦于以下问题:C++ OBJ_CONSTRUCT函数的具体用法?C++ OBJ_CONSTRUCT怎么用?C++ OBJ_CONSTRUCT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了OBJ_CONSTRUCT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ompi_osc_pt2pt_replyreq_construct
static void ompi_osc_pt2pt_replyreq_construct(ompi_osc_pt2pt_replyreq_t *replyreq)
{
OBJ_CONSTRUCT(&(replyreq->rep_target_convertor), ompi_convertor_t);
}
示例2: mca_btl_ud_component_open
int mca_btl_ud_component_open(void)
{
int val;
/* initialize state */
mca_btl_ofud_component.num_btls = 0;
mca_btl_ofud_component.ud_btls = NULL;
/* initialize objects */
OBJ_CONSTRUCT(&mca_btl_ofud_component.ud_lock, opal_mutex_t);
OBJ_CONSTRUCT(&mca_btl_ofud_component.ud_procs, opal_list_t);
/* register IB component parameters */
mca_btl_ud_param_reg_int("max_btls",
"Maximum number of HCAs/ports to use",
4, (int*)&mca_btl_ofud_component.max_btls);
mca_btl_ud_param_reg_string("mpool", "Name of the memory pool to be used",
"rdma", &mca_btl_ofud_component.ud_mpool_name);
mca_btl_ud_param_reg_int("ib_pkey_index", "IB pkey index",
0, (int*)&mca_btl_ofud_component.ib_pkey_ix);
mca_btl_ud_param_reg_int("ib_qkey", "IB qkey",
0x01330133, (int*)&mca_btl_ofud_component.ib_qkey);
mca_btl_ud_param_reg_int("ib_service_level", "IB service level",
0, (int*)&mca_btl_ofud_component.ib_service_level);
mca_btl_ud_param_reg_int("ib_src_path_bits", "IB source path bits",
0, (int*)&mca_btl_ofud_component.ib_src_path_bits);
mca_btl_ud_param_reg_int("sd_num", "maximum send descriptors to post",
128, (int*)&mca_btl_ofud_component.sd_num);
mca_btl_ud_param_reg_int("rd_num", "number of receive buffers",
6000, (int*)&mca_btl_ofud_component.rd_num);
#if 0
mca_btl_ud_param_reg_int("rd_num_init", "initial receive buffers",
3000, (int*)&mca_btl_ofud_component.rd_num_init);
mca_btl_ud_param_reg_int("rd_num_max", "maximum receive buffers",
4500, (int*)&mca_btl_ofud_component.rd_num_max);
mca_btl_ud_param_reg_int("rd_num_inc",
"number of buffers to post when rate is high",
25, (int*)&mca_btl_ofud_component.rd_num_inc);
#endif
/* TODO - this assumes a 2k UD MTU - query/do something more intelligent */
/*mca_btl_ud_param_reg_int("eager_limit", "eager send limit",
2048, &val); */
mca_btl_ud_param_reg_int("min_send_size", "minimum send size",
2048, &val);
mca_btl_ofud_module.super.btl_rndv_eager_limit = val;
mca_btl_ud_param_reg_int("max_send_size", "maximum send size",
2048, &val);
mca_btl_ofud_module.super.btl_eager_limit = val;
mca_btl_ofud_module.super.btl_max_send_size = val;
mca_btl_ud_param_reg_int("exclusivity", "BTL exclusivity",
MCA_BTL_EXCLUSIVITY_DEFAULT,
(int*)&mca_btl_ofud_module.super.btl_exclusivity);
mca_btl_ud_param_reg_int("bandwidth",
"Approximate maximum bandwidth of interconnect",
800, (int*)&mca_btl_ofud_module.super.btl_bandwidth);
mca_btl_ofud_module.super.btl_eager_limit -= sizeof(mca_btl_ud_header_t);
mca_btl_ofud_module.super.btl_max_send_size -= sizeof(mca_btl_ud_header_t);
return OMPI_SUCCESS;
}
示例3: lookup
//.........这里部分代码省略.........
}
/* lookup the value on the global ompi_server, but error
* if that server wasn't contacted
*/
if (!mca_pubsub_orte_component.server_found) {
opal_show_help("help-ompi-pubsub-orte.txt",
"pubsub-orte:no-server",
true, (long)ORTE_PROC_MY_NAME->vpid,
"lookup from");
return NULL;
}
info_host = &mca_pubsub_orte_component.server;
} else if (NONE == lookup[i]) {
continue;
} else {
/* unknown host! */
opal_show_help("help-ompi-pubsub-orte.txt",
"pubsub-orte:unknown-order",
true, (long)ORTE_PROC_MY_NAME->vpid);
return NULL;
}
/* go look it up */
/* construct the buffer */
buf = OBJ_NEW(opal_buffer_t);
/* pack the lookup command */
if (OPAL_SUCCESS != (ret = opal_dss.pack(buf, &cmd, 1, ORTE_DATA_SERVER_CMD))) {
ORTE_ERROR_LOG(ret);
OBJ_RELEASE(buf);
goto CLEANUP;
}
/* pack the service name */
if (OPAL_SUCCESS != (ret = opal_dss.pack(buf, &service_name, 1, OPAL_STRING))) {
ORTE_ERROR_LOG(ret);
OBJ_RELEASE(buf);
goto CLEANUP;
}
/* send the cmd */
if (0 > (ret = orte_rml.send_buffer_nb(info_host, buf,
ORTE_RML_TAG_DATA_SERVER,
orte_rml_send_callback, NULL))) {
ORTE_ERROR_LOG(ret);
OBJ_RELEASE(buf);
goto CLEANUP;
}
/* get the answer */
OBJ_CONSTRUCT(&xfer, orte_rml_recv_cb_t);
xfer.active = true;
orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
ORTE_RML_TAG_DATA_CLIENT,
ORTE_RML_NON_PERSISTENT,
orte_rml_recv_callback, &xfer);
OMPI_WAIT_FOR_COMPLETION(xfer.active);
/* unpack the return code */
cnt = 1;
if (OPAL_SUCCESS != (ret = opal_dss.unpack(&xfer.data, &rc, &cnt, OPAL_INT))) {
ORTE_ERROR_LOG(ret);
goto CLEANUP;
}
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_framework.framework_output,
"%s pubsub:orte: lookup returned status %d",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), rc));
if (ORTE_SUCCESS == rc) {
/* the server was able to lookup the port - unpack the port name */
cnt=1;
if (OPAL_SUCCESS != (ret = opal_dss.unpack(&xfer.data, &port_name, &cnt, OPAL_STRING))) {
ORTE_ERROR_LOG(ret);
OBJ_DESTRUCT(&xfer);
goto CLEANUP;
}
OPAL_OUTPUT_VERBOSE((1, ompi_pubsub_base_framework.framework_output,
"%s pubsub:orte: lookup returned port %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(NULL == port_name) ? "NULL" : port_name));
if (NULL != port_name) {
/* got an answer - return it */
OBJ_DESTRUCT(&xfer);
return port_name;
}
}
/* if we didn't get a port_name, then continue */
OBJ_DESTRUCT(&xfer);
}
/* only get here if we tried both options and failed - since the
* buffer will already have been cleaned up, just return
*/
CLEANUP:
return NULL;
}
示例4: orte_regex_create
int orte_regex_create(char *nodelist, char **regexp)
{
char *node;
char prefix[ORTE_MAX_NODE_PREFIX];
int i, j, len, startnum, nodenum, numdigits;
bool found, fullname;
char *suffix, *sfx;
orte_regex_node_t *ndreg;
orte_regex_range_t *range;
opal_list_t nodeids;
opal_list_item_t *item, *itm2;
char **regexargs = NULL, *tmp, *tmp2;
char *cptr;
/* define the default */
*regexp = NULL;
cptr = strchr(nodelist, ',');
if (NULL == cptr) {
/* if there is only one node, don't bother */
*regexp = strdup(nodelist);
return ORTE_SUCCESS;
}
/* setup the list of results */
OBJ_CONSTRUCT(&nodeids, opal_list_t);
/* cycle thru the array of nodenames */
node = nodelist;
while (NULL != (cptr = strchr(node, ',')) || 0 < strlen(node)) {
if (NULL != cptr) {
*cptr = '\0';
}
/* determine this node's prefix by looking for first non-alpha char */
fullname = false;
len = strlen(node);
startnum = -1;
memset(prefix, 0, ORTE_MAX_NODE_PREFIX);
numdigits = 0;
for (i=0, j=0; i < len; i++) {
if (!isalpha(node[i])) {
/* found a non-alpha char */
if (!isdigit(node[i])) {
/* if it is anything but a digit, we just use
* the entire name
*/
fullname = true;
break;
}
/* count the size of the numeric field - but don't
* add the digits to the prefix
*/
numdigits++;
if (startnum < 0) {
/* okay, this defines end of the prefix */
startnum = i;
}
continue;
}
if (startnum < 0) {
prefix[j++] = node[i];
}
}
if (fullname || startnum < 0) {
/* can't compress this name - just add it to the list */
ndreg = OBJ_NEW(orte_regex_node_t);
ndreg->prefix = strdup(node);
opal_list_append(&nodeids, &ndreg->super);
/* move to the next posn */
if (NULL == cptr) {
break;
}
node = cptr + 1;
continue;
}
/* convert the digits and get any suffix */
nodenum = strtol(&node[startnum], &sfx, 10);
if (NULL != sfx) {
suffix = strdup(sfx);
} else {
suffix = NULL;
}
/* is this nodeid already on our list? */
found = false;
for (item = opal_list_get_first(&nodeids);
!found && item != opal_list_get_end(&nodeids);
item = opal_list_get_next(item)) {
ndreg = (orte_regex_node_t*)item;
if (0 < strlen(prefix) && NULL == ndreg->prefix) {
continue;
}
if (0 == strlen(prefix) && NULL != ndreg->prefix) {
continue;
}
if (0 < strlen(prefix) && NULL != ndreg->prefix
&& 0 != strcmp(prefix, ndreg->prefix)) {
continue;
}
if (NULL == suffix && NULL != ndreg->suffix) {
continue;
//.........这里部分代码省略.........
示例5: btl_openib_async_thread
/* This Async event thread is handling all async event of
* all btls/devices in openib component
*/
void* btl_openib_async_thread(void * async)
{
int rc;
int i;
struct mca_btl_openib_async_poll devices_poll;
opal_list_t ignore_qp_err_list;
OBJ_CONSTRUCT(&ignore_qp_err_list, opal_list_t);
if (OMPI_SUCCESS != btl_openib_async_poll_init(&devices_poll)) {
BTL_ERROR(("Fatal error, stoping asynch event thread"));
pthread_exit(&return_status);
}
while(1) {
rc = poll(devices_poll.async_pollfd, devices_poll.active_poll_size, -1);
if (rc < 0) {
if (errno != EINTR) {
BTL_ERROR(("Poll failed. Fatal error, stoping asynch event thread"));
pthread_exit(&return_status);
} else {
/* EINTR - we got interupt */
continue;
}
}
for(i = 0; i < devices_poll.active_poll_size; i++) {
switch (devices_poll.async_pollfd[i].revents) {
case 0:
/* no events */
break;
case POLLIN:
#if defined(__SVR4) && defined(__sun)
/*
* Need workaround for Solaris IB user verbs since
* "Poll on IB async fd returns POLLRDNORM revent even though it is masked out"
*/
case POLLIN | POLLRDNORM:
#endif
/* Processing our event */
if (0 == i) {
/* 0 poll we use for comunication with main thread */
if (OMPI_SUCCESS != btl_openib_async_commandh(&devices_poll,
&ignore_qp_err_list)) {
free(devices_poll.async_pollfd);
BTL_ERROR(("Failed to process async thread process. "
"Fatal error, stoping asynch event thread"));
pthread_exit(&return_status);
}
} else {
/* We get device event */
if (btl_openib_async_deviceh(&devices_poll, i,
&ignore_qp_err_list)) {
free(devices_poll.async_pollfd);
BTL_ERROR(("Failed to process async thread process. "
"Fatal error, stoping asynch event thread"));
pthread_exit(&return_status);
}
}
break;
default:
/* Get event other than POLLIN
* this case should not never happend */
BTL_ERROR(("Got unexpected event %d. "
"Fatal error, stoping asynch event thread",
devices_poll.async_pollfd[i].revents));
free(devices_poll.async_pollfd);
pthread_exit(&return_status);
}
}
}
return PTHREAD_CANCELED;
}
示例6: rank_by
static int rank_by(orte_job_t *jdata,
orte_app_context_t *app,
opal_list_t *nodes,
hwloc_obj_type_t target,
unsigned cache_level)
{
hwloc_obj_t obj;
int num_objs, i, j, rc;
orte_vpid_t num_ranked=0;
orte_node_t *node;
orte_proc_t *proc;
orte_vpid_t vpid;
int cnt;
opal_pointer_array_t objs;
bool all_done;
opal_list_item_t *item;
hwloc_obj_t locale;
if (ORTE_RANKING_SPAN & ORTE_GET_RANKING_DIRECTIVE(jdata->map->ranking)) {
return rank_span(jdata, app, nodes, target, cache_level);
} else if (ORTE_RANKING_FILL & ORTE_GET_RANKING_DIRECTIVE(jdata->map->ranking)) {
return rank_fill(jdata, app, nodes, target, cache_level);
}
/* if ranking is not spanned or filled, then we
* default to assign ranks sequentially across
* target objects within a node until that node
* is fully ranked, and then move on to the next
* node
*
* Node 0 Node 1
* Obj 0 Obj 1 Obj 0 Obj 1
* 0 2 1 3 8 10 9 11
* 4 6 5 7 12 14 13 15
*/
/* setup the pointer array */
OBJ_CONSTRUCT(&objs, opal_pointer_array_t);
opal_pointer_array_init(&objs, 2, INT_MAX, 2);
vpid = jdata->num_procs;
cnt = 0;
for (item = opal_list_get_first(nodes);
item != opal_list_get_end(nodes);
item = opal_list_get_next(item)) {
node = (orte_node_t*)item;
/* get the number of objects - only consider those we can actually use */
num_objs = opal_hwloc_base_get_nbobjs_by_type(node->topology, target,
cache_level, OPAL_HWLOC_AVAILABLE);
opal_output_verbose(5, orte_rmaps_base_framework.framework_output,
"mca:rmaps:rank_by: found %d objects on node %s with %d procs",
num_objs, node->name, (int)node->num_procs);
if (0 == num_objs) {
return ORTE_ERR_NOT_SUPPORTED;
}
/* collect all the objects */
for (i=0; i < num_objs; i++) {
obj = opal_hwloc_base_get_obj_by_type(node->topology, target,
cache_level, i, OPAL_HWLOC_AVAILABLE);
opal_pointer_array_set_item(&objs, i, obj);
}
/* cycle across the objects, assigning a proc to each one,
* until all procs have been assigned - unfortunately, since
* more than this job may be mapped onto a node, the number
* of procs on the node can't be used to tell us when we
* are done. Instead, we have to just keep going until all
* procs are ranked - which means we have to make one extra
* pass thru the loop
*
* Perhaps someday someone will come up with a more efficient
* algorithm, but this works for now.
*/
all_done = false;
while (!all_done && cnt < app->num_procs) {
all_done = true;
/* cycle across the objects */
for (i=0; i < num_objs && cnt < app->num_procs; i++) {
obj = (hwloc_obj_t)opal_pointer_array_get_item(&objs, i);
/* find the next proc on this object */
for (j=0; j < node->procs->size && cnt < app->num_procs; j++) {
if (NULL == (proc = (orte_proc_t*)opal_pointer_array_get_item(node->procs, j))) {
continue;
}
/* ignore procs from other jobs */
if (proc->name.jobid != jdata->jobid) {
opal_output_verbose(5, orte_rmaps_base_framework.framework_output,
"mca:rmaps:rank_by skipping proc %s - from another job, num_ranked %d",
ORTE_NAME_PRINT(&proc->name), num_ranked);
continue;
}
/* ignore procs that are already ranked */
if (ORTE_VPID_INVALID != proc->name.vpid) {
continue;
}
/* ignore procs from other apps */
if (proc->app_idx != app->idx) {
continue;
}
//.........这里部分代码省略.........
示例7: ompi_proc_pack
int
ompi_proc_pack(ompi_proc_t **proclist, int proclistsize,
bool full_info,
opal_buffer_t* buf)
{
int i, rc;
OPAL_THREAD_LOCK(&ompi_proc_lock);
/* cycle through the provided array, packing the OMPI level
* data for each proc. This data may or may not be included
* in any subsequent modex operation, so we include it here
* to ensure completion of a connect/accept handshake. See
* the ompi/mca/dpm framework for an example of where and how
* this info is used.
*
* Eventually, we will review the procedures that call this
* function to see if duplication of communication can be
* reduced. For now, just go ahead and pack the info so it
* can be sent.
*/
for (i=0; i<proclistsize; i++) {
rc = opal_dss.pack(buf, &(proclist[i]->proc_name), 1, OMPI_NAME);
if(rc != OPAL_SUCCESS) {
OMPI_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
}
if (full_info) {
int32_t num_entries;
opal_value_t *kv;
opal_list_t data;
/* fetch all global info we know about the peer - while
* the remote procs may already know some of it, we cannot
* be certain they do. So we must include a full dump of
* everything we know about this proc, excluding INTERNAL
* data that each process computes about its peers
*/
OBJ_CONSTRUCT(&data, opal_list_t);
rc = opal_db.fetch_multiple((opal_identifier_t*)&proclist[i]->proc_name,
OPAL_SCOPE_GLOBAL, NULL, &data);
if (OPAL_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
num_entries = 0;
} else {
/* count the number of entries we will send */
num_entries = opal_list_get_size(&data);
}
/* put the number of entries into the buffer */
rc = opal_dss.pack(buf, &num_entries, 1, OPAL_INT32);
if (OPAL_SUCCESS != rc) {
OMPI_ERROR_LOG(rc);
break;
}
/* if there are entries, store them */
while (NULL != (kv = (opal_value_t*)opal_list_remove_first(&data))) {
if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &kv, 1, OPAL_VALUE))) {
OMPI_ERROR_LOG(rc);
break;
}
OBJ_RELEASE(kv);
}
OBJ_DESTRUCT(&data);
} else {
rc = opal_dss.pack(buf, &(proclist[i]->proc_arch), 1, OPAL_UINT32);
if(rc != OPAL_SUCCESS) {
OMPI_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
}
rc = opal_dss.pack(buf, &(proclist[i]->proc_hostname), 1, OPAL_STRING);
if(rc != OPAL_SUCCESS) {
OMPI_ERROR_LOG(rc);
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return rc;
}
}
}
OPAL_THREAD_UNLOCK(&ompi_proc_lock);
return OMPI_SUCCESS;
}
示例8: mca_coll_base_find_available
/*
* Scan down the list of successfully opened components and query each of
* them (the opened list will be one or more components. If the user
* requested a specific component, it will be the only component in the
* opened list). Create and populate the available list of all
* components who indicate that they want to be considered for selection.
* Close all components who do not want to be considered for selection,
* and destroy the opened list.
*
* Also find the basic component while we're doing all of this, and save
* it in a global variable so that we can find it easily later (e.g.,
* during scope selection).
*/
int mca_coll_base_find_available(bool enable_progress_threads,
bool enable_mpi_threads)
{
bool found = false;
mca_base_component_priority_list_item_t *entry;
opal_list_item_t *p;
const mca_base_component_t *component;
/* Initialize the list */
OBJ_CONSTRUCT(&mca_coll_base_components_available, opal_list_t);
mca_coll_base_components_available_valid = true;
/* The list of components that we should check has already been
established in mca_coll_base_open. */
for (found = false,
p = opal_list_remove_first(&mca_coll_base_components_opened);
p != NULL;
p = opal_list_remove_first(&mca_coll_base_components_opened)) {
component = ((mca_base_component_list_item_t *) p)->cli_component;
/* Call a subroutine to do the work, because the component may
represent different versions of the coll MCA. */
entry = OBJ_NEW(mca_base_component_priority_list_item_t);
entry->super.cli_component = component;
entry->cpli_priority = 0;
if (OMPI_SUCCESS == init_query(component, entry,
enable_progress_threads,
enable_mpi_threads)) {
opal_list_append(&mca_coll_base_components_available,
(opal_list_item_t *) entry);
found = true;
} else {
/* If the component doesn't want to run, then close it. It's
already had its close() method invoked; now close it out of
the DSO repository (if it's there). */
mca_base_component_repository_release(component);
OBJ_RELEASE(entry);
}
/* Free the entry from the "opened" list */
OBJ_RELEASE(p);
}
/* The opened list is now no longer useful and we can free it */
OBJ_DESTRUCT(&mca_coll_base_components_opened);
mca_coll_base_components_opened_valid = false;
/* If we have no collective components available, it's an error.
Thanks for playing! */
if (!found) {
/* Need to free all items in the list */
OBJ_DESTRUCT(&mca_coll_base_components_available);
mca_coll_base_components_available_valid = false;
opal_output_verbose(10, mca_coll_base_output,
"coll:find_available: no coll components available!");
orte_show_help("help-mca-base", "find-available:none-found", true,
"coll");
return OMPI_ERROR;
}
/* All done */
return OMPI_SUCCESS;
}
示例9: plm_slurm_launch_job
//.........这里部分代码省略.........
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
"%s plm:slurm: final top-level argv:\n\t%s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(NULL == param) ? "NULL" : param));
if (NULL != param) free(param);
}
/* exec the daemon(s) */
if (ORTE_SUCCESS != (rc = plm_slurm_start_proc(argc, argv, env, cur_prefix))) {
ORTE_ERROR_LOG(rc);
goto cleanup;
}
/* do NOT wait for srun to complete. Srun only completes when the processes
* it starts - in this case, the orteds - complete. Instead, we'll catch
* any srun failures and deal with them elsewhere
*/
/* wait for daemons to callback */
if (ORTE_SUCCESS != (rc = orte_plm_base_daemon_callback(map->num_new_daemons))) {
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
"%s plm:slurm: daemon launch failed for job %s on error %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_JOBID_PRINT(active_job), ORTE_ERROR_NAME(rc)));
goto cleanup;
}
launch_apps:
/* get here if daemons launch okay - any failures now by apps */
launching_daemons = false;
failed_job = active_job;
if (using_regexp) {
/* daemons already have launch cmd - just wait for them to
* report back
*/
opal_buffer_t launch;
int8_t flag;
orte_daemon_cmd_flag_t command = ORTE_DAEMON_ADD_LOCAL_PROCS;
OBJ_CONSTRUCT(&launch, opal_buffer_t);
opal_dss.pack(&launch, &command, 1, ORTE_DAEMON_CMD);
flag = 1;
opal_dss.pack(&launch, &flag, 1, OPAL_INT8);
opal_dss.pack(&launch, &orted_launch_cmd, 1, OPAL_STRING);
ORTE_MESSAGE_EVENT(ORTE_PROC_MY_NAME, &launch, ORTE_RML_TAG_DAEMON, orte_daemon_cmd_processor);
OBJ_DESTRUCT(&launch);
if (ORTE_SUCCESS != (rc = orte_plm_base_report_launched(jdata->jobid))) {
OPAL_OUTPUT_VERBOSE((5, orte_plm_globals.output,
"%s plm:slurm:launch failed for job %s on error %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_JOBID_PRINT(jdata->jobid), ORTE_ERROR_NAME(rc)));
goto cleanup;
}
} else {
if (ORTE_SUCCESS != (rc = orte_plm_base_launch_apps(active_job))) {
OPAL_OUTPUT_VERBOSE((1, orte_plm_globals.output,
"%s plm:slurm: launch of apps failed for job %s on error %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_JOBID_PRINT(active_job), ORTE_ERROR_NAME(rc)));
goto cleanup;
}
}
/* declare the launch a success */
failed_launch = false;
if (orte_timing) {
if (0 != gettimeofday(&launchstop, NULL)) {
opal_output(0, "plm_slurm: could not obtain stop time");
} else {
opal_output(0, "plm_slurm: total job launch time is %ld usec",
(launchstop.tv_sec - launchstart.tv_sec)*1000000 +
(launchstop.tv_usec - launchstart.tv_usec));
}
}
if (ORTE_SUCCESS != rc) {
opal_output(0, "plm:slurm: start_procs returned error %d", rc);
goto cleanup;
}
cleanup:
if (NULL != argv) {
opal_argv_free(argv);
}
if (NULL != env) {
opal_argv_free(env);
}
if(NULL != jobid_string) {
free(jobid_string);
}
/* check for failed launch - if so, force terminate */
if (failed_launch) {
orte_plm_base_launch_failed(failed_job, -1, ORTE_ERROR_DEFAULT_EXIT_CODE, ORTE_JOB_STATE_FAILED_TO_START);
}
return rc;
}
示例10: mca_pml_yalla_convertor_construct
static void mca_pml_yalla_convertor_construct(mca_pml_yalla_convertor_t *convertor)
{
OBJ_CONSTRUCT(&convertor->convertor, opal_convertor_t);
}
示例11: native_get_attr
static bool native_get_attr(const char *attr, opal_value_t **kv)
{
opal_buffer_t *msg, *bptr;
opal_list_t vals;
opal_value_t *kp, *lclpeers=NULL, kvn;
pmix_cmd_t cmd = PMIX_GETATTR_CMD;
char **ranks;
int rc, ret;
int32_t cnt;
bool found=false;
opal_hwloc_locality_t locality;
pmix_cb_t *cb;
uint32_t i, myrank;
opal_process_name_t id;
char *cpuset;
opal_buffer_t buf, buf2;
opal_output_verbose(2, opal_pmix_base_framework.framework_output,
"%s pmix:native get_attr called",
OPAL_NAME_PRINT(OPAL_PROC_MY_NAME));
/* try to retrieve the requested value from the dstore */
OBJ_CONSTRUCT(&vals, opal_list_t);
if (OPAL_SUCCESS == opal_dstore.fetch(opal_dstore_internal, &OPAL_PROC_MY_NAME, attr, &vals)) {
*kv = (opal_value_t*)opal_list_remove_first(&vals);
OPAL_LIST_DESTRUCT(&vals);
return true;
}
if (NULL == mca_pmix_native_component.uri) {
/* no server available, so just return */
return false;
}
/* if the value isn't yet available, then we should try to retrieve
* all the available attributes and store them for future use */
msg = OBJ_NEW(opal_buffer_t);
/* pack the cmd */
if (OPAL_SUCCESS != (rc = opal_dss.pack(msg, &cmd, 1, PMIX_CMD_T))) {
OPAL_ERROR_LOG(rc);
OBJ_RELEASE(msg);
return false;
}
/* create a callback object as we need to pass it to the
* recv routine so we know which callback to use when
* the return message is recvd */
cb = OBJ_NEW(pmix_cb_t);
cb->active = true;
/* push the message into our event base to send to the server */
PMIX_ACTIVATE_SEND_RECV(msg, wait_cbfunc, cb);
/* wait for the data to return */
PMIX_WAIT_FOR_COMPLETION(cb->active);
/* we have received the entire data blob for this process - unpack
* and cache all values, keeping the one we requested to return
* to the caller */
cnt = 1;
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&cb->data, &ret, &cnt, OPAL_INT))) {
OPAL_ERROR_LOG(rc);
OBJ_RELEASE(cb);
return false;
}
if (OPAL_SUCCESS == ret) {
/* unpack the buffer containing the values */
cnt = 1;
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&cb->data, &bptr, &cnt, OPAL_BUFFER))) {
OPAL_ERROR_LOG(rc);
OBJ_RELEASE(cb);
return false;
}
cnt = 1;
while (OPAL_SUCCESS == (rc = opal_dss.unpack(bptr, &kp, &cnt, OPAL_VALUE))) {
opal_output_verbose(2, opal_pmix_base_framework.framework_output,
"%s unpacked attr %s",
OPAL_NAME_PRINT(OPAL_PROC_MY_NAME), kp->key);
/* if this is the local topology, we need to save it in a special way */
#if OPAL_HAVE_HWLOC
{
hwloc_topology_t topo;
if (0 == strcmp(PMIX_LOCAL_TOPO, kp->key)) {
opal_output_verbose(2, opal_pmix_base_framework.framework_output,
"%s saving topology",
OPAL_NAME_PRINT(OPAL_PROC_MY_NAME));
/* transfer the byte object for unpacking */
OBJ_CONSTRUCT(&buf, opal_buffer_t);
opal_dss.load(&buf, kp->data.bo.bytes, kp->data.bo.size);
kp->data.bo.bytes = NULL; // protect the data region
kp->data.bo.size = 0;
OBJ_RELEASE(kp);
/* extract the topology */
cnt=1;
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&buf, &topo, &cnt, OPAL_HWLOC_TOPO))) {
OPAL_ERROR_LOG(rc);
OBJ_DESTRUCT(&buf);
continue;
}
OBJ_DESTRUCT(&buf);
//.........这里部分代码省略.........
示例12: npernode
/* place specified #procs on each node, up to the specified total
* number of procs (if one was given).
*/
static int npernode(orte_job_t *jdata)
{
orte_app_context_t *app;
int i, j, rc=ORTE_SUCCESS;
opal_list_t node_list;
opal_list_item_t *item;
orte_std_cntr_t num_slots;
orte_node_t *node;
int np, nprocs;
int num_nodes;
/* setup the node list */
OBJ_CONSTRUCT(&node_list, opal_list_t);
/* loop through the app_contexts */
for(i=0; i < jdata->apps->size; i++) {
if (NULL == (app = (orte_app_context_t*)opal_pointer_array_get_item(jdata->apps, i))) {
continue;
}
/* use the number of procs if one was given */
if (0 < app->num_procs) {
np = app->num_procs;
} else {
np = INT_MAX;
}
/* for each app_context, we have to get the list of nodes that it can
* use since that can now be modified with a hostfile and/or -host
* option
*/
if(ORTE_SUCCESS != (rc = orte_rmaps_base_get_target_nodes(&node_list, &num_slots, app,
jdata->map->policy))) {
ORTE_ERROR_LOG(rc);
goto error;
}
/* loop through the list of nodes */
num_nodes = opal_list_get_size(&node_list);
nprocs = 0;
while (NULL != (item = opal_list_remove_first(&node_list))) {
node = (orte_node_t*)item;
/* put the specified number of procs on each node */
for (j=0; j < jdata->map->npernode && nprocs < np; j++) {
if (ORTE_SUCCESS != (rc = orte_rmaps_base_claim_slot(jdata, node,
jdata->map->cpus_per_rank, app->idx,
&node_list, jdata->map->oversubscribe,
false, NULL))) {
/** if the code is ORTE_ERR_NODE_FULLY_USED, and we still have
* more procs to place, then that is an error
*/
if (ORTE_ERR_NODE_FULLY_USED != rc ||
j < jdata->map->npernode-1) {
ORTE_ERROR_LOG(rc);
OBJ_RELEASE(node);
goto error;
}
}
nprocs++;
}
OBJ_RELEASE(node);
}
/* update the number of procs in the job */
jdata->num_procs += nprocs;
/* if the user requested a specific number of procs and
* the total number of procs we were able to assign
* doesn't equal the number requested, then we have a
* problem
*/
if (0 < app->num_procs && nprocs < app->num_procs) {
orte_show_help("help-orte-rmaps-base.txt", "rmaps:too-many-procs", true,
app->app, app->num_procs,
"number of nodes", num_nodes,
"npernode", jdata->map->npernode);
return ORTE_ERR_SILENT;
}
/* compute vpids and add proc objects to the job - this has to be
* done after each app_context is mapped in order to keep the
* vpids contiguous within an app_context
*/
if (ORTE_SUCCESS != (rc = orte_rmaps_base_compute_vpids(jdata))) {
ORTE_ERROR_LOG(rc);
return rc;
}
}
error:
while (NULL != (item = opal_list_remove_first(&node_list))) {
OBJ_RELEASE(item);
}
OBJ_DESTRUCT(&node_list);
return rc;
}
示例13: rte_init
static int rte_init(char flags)
{
int ret;
char *error = NULL;
orte_jmap_t *jmap;
/* run the prolog */
if (ORTE_SUCCESS != (ret = orte_ess_base_std_prolog())) {
error = "orte_ess_base_std_prolog";
goto error;
}
/* Start by getting a unique name */
slurm_set_name();
/* if I am a daemon, complete my setup using the
* default procedure
*/
if (orte_process_info.daemon) {
if (ORTE_SUCCESS != (ret = orte_ess_base_orted_setup())) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_orted_setup";
goto error;
}
} else if (orte_process_info.tool) {
/* otherwise, if I am a tool proc, use that procedure */
if (ORTE_SUCCESS != (ret = orte_ess_base_tool_setup())) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_tool_setup";
goto error;
}
} else {
/* otherwise, I must be an application process - use
* the default procedure to finish my setup
*/
if (ORTE_SUCCESS != (ret = orte_ess_base_app_setup())) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_app_setup";
goto error;
}
/* setup the nidmap arrays */
OBJ_CONSTRUCT(&nidmap, opal_pointer_array_t);
opal_pointer_array_init(&nidmap, 8, INT32_MAX, 8);
/* setup array of jmaps */
OBJ_CONSTRUCT(&jobmap, opal_pointer_array_t);
opal_pointer_array_init(&jobmap, 1, INT32_MAX, 1);
jmap = OBJ_NEW(orte_jmap_t);
jmap->job = ORTE_PROC_MY_NAME->jobid;
opal_pointer_array_add(&jobmap, jmap);
/* if one was provided, build my nidmap */
if (ORTE_SUCCESS != (ret = orte_ess_base_build_nidmap(orte_process_info.sync_buf,
&nidmap, &jmap->pmap, &nprocs))) {
ORTE_ERROR_LOG(ret);
error = "orte_ess_base_build_nidmap";
goto error;
}
}
return ORTE_SUCCESS;
error:
orte_show_help("help-orte-runtime.txt",
"orte_init:startup:internal-failure",
true, error, ORTE_ERROR_NAME(ret), ret);
return ret;
}
示例14: proc_data_construct
static void proc_data_construct(opal_dstore_proc_data_t *ptr)
{
ptr->loaded = false;
OBJ_CONSTRUCT(&ptr->data, opal_list_t);
}
示例15: main
int
main(int argc, char *argv[]){
int count;
int msgsize;
uint8_t *msg;
int i, j, rc;
orte_process_name_t peer;
double maxpower;
/*
* Init
*/
orte_init(&argc, &argv, ORTE_PROC_NON_MPI);
if (argc > 1) {
count = atoi(argv[1]);
if (count < 0) {
count = INT_MAX-1;
}
} else {
count = MAX_COUNT;
}
peer.jobid = ORTE_PROC_MY_NAME->jobid;
for (j=1; j < count+1; j++) {
peer.vpid = (ORTE_PROC_MY_NAME->vpid + j) % orte_process_info.num_procs;
ORTE_EPOCH_SET(peer.epoch,orte_ess.proc_get_epoch(&peer));
/* rank0 starts ring */
if (ORTE_PROC_MY_NAME->vpid == 0) {
/* setup the initiating buffer - put random sized message in it */
OBJ_CONSTRUCT(&buf, opal_buffer_t);
maxpower = (double)(j%7);
msgsize = (int)pow(10.0, maxpower);
opal_output(0, "Ring %d message size %d bytes", j, msgsize);
msg = (uint8_t*)malloc(msgsize);
opal_dss.pack(&buf, msg, msgsize, OPAL_BYTE);
if (0 > (rc = orte_rml.send_buffer(&peer,&buf, MY_TAG, 0))) {
opal_output(0, "error sending to %s %s\n", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&peer), ORTE_ERROR_NAME(rc));
exit(1);
}
OBJ_DESTRUCT(&buf);
/* wait for it to come around */
OBJ_CONSTRUCT(&buf, opal_buffer_t);
msg_recvd = false;
orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, MY_TAG,
ORTE_RML_NON_PERSISTENT, recv_ack, NULL);
ORTE_PROGRESSED_WAIT(msg_recvd, 0, 1);
opal_output(0, "%s Ring %d completed", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), j);
} else {
/* wait for msg */
OBJ_CONSTRUCT(&buf, opal_buffer_t);
msg_recvd = false;
orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, MY_TAG,
ORTE_RML_NON_PERSISTENT, recv_ack, NULL);
ORTE_PROGRESSED_WAIT(msg_recvd, 0, 1);
/* send it along */
if (0 > (rc = orte_rml.send_buffer(&peer, &buf, MY_TAG, 0))) {
opal_output(0, "%s error sending to %s %s\n", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_NAME_PRINT(&peer), ORTE_ERROR_NAME(rc));
exit(1);
}
OBJ_DESTRUCT(&buf);
}
}
orte_finalize();
return 0;
}