本文整理汇总了C++中opal_pointer_array_add函数的典型用法代码示例。如果您正苦于以下问题:C++ opal_pointer_array_add函数的具体用法?C++ opal_pointer_array_add怎么用?C++ opal_pointer_array_add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了opal_pointer_array_add函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: orte_util_setup_local_nidmap_entries
int orte_util_setup_local_nidmap_entries(void)
{
orte_nid_t *node;
orte_jmap_t *jmap;
orte_pmap_t *pmap;
/* add a jmap entry for myself */
jmap = OBJ_NEW(orte_jmap_t);
jmap->job = ORTE_PROC_MY_NAME->jobid;
opal_pointer_array_add(&orte_jobmap, jmap);
jmap->num_procs = 1;
/* create a nidmap entry for this node */
node = OBJ_NEW(orte_nid_t);
node->name = strdup(orte_process_info.nodename);
node->daemon = ORTE_PROC_MY_DAEMON->vpid;
pmap = OBJ_NEW(orte_pmap_t);
pmap->local_rank = 0;
pmap->node_rank = 0;
node->index = opal_pointer_array_add(&orte_nidmap, node);
/* value array copies values, so everything must be set before
* calling the set_item function
*/
pmap->node = node->index;
opal_pointer_array_set_item(&jmap->pmap, ORTE_PROC_MY_NAME->vpid, pmap);
/* all done */
return ORTE_SUCCESS;
}
示例2: mca_spml_base_open
/**
* Function for finding and opening either all MCA components, or the one
* that was specifically requested via a MCA parameter.
*/
static int mca_spml_base_open(mca_base_open_flag_t flags)
{
/**
* Construct the send and receive request queues. There are 2 reasons to do it
* here. First, as they are globals it's better to construct them in one common
* place. Second, in order to be able to allow the external debuggers to show
* their content, they should get constructed as soon as possible once the MPI
* process is started.
*/
OBJ_CONSTRUCT(&mca_spml_base_put_requests, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_spml_base_get_requests, ompi_free_list_t);
OBJ_CONSTRUCT(&mca_spml_base_spml, opal_pointer_array_t);
/* Open up all available components */
if (OPAL_SUCCESS !=
mca_base_framework_components_open(&oshmem_spml_base_framework, flags)) {
return OSHMEM_ERROR;
}
/* Set a sentinel in case we don't select any components (e.g.,
ompi_info) */
mca_spml_base_selected_component.spmlm_finalize = NULL;
/**
* Right now our selection of BTLs is completely broken. If we have
* multiple SPMLs that use BTLs than we will open all BTLs several times, leading to
* undefined behaviors. The simplest solution, at least until we
* figure out the correct way to do it, is to force a default SPML that
* uses BTLs and any other SPMLs that do not in the mca_spml_base_spml array.
*/
#if MCA_ompi_pml_DIRECT_CALL
opal_pointer_array_add(&mca_spml_base_spml,
strdup(stringify(MCA_oshmem_spml_DIRECT_CALL_COMPONENT)));
#else
{
const char **default_spml = NULL;
int var_id;
var_id = mca_base_var_find("oshmem", "spml", NULL, NULL);
mca_base_var_get_value(var_id, &default_spml, NULL, NULL);
if( (NULL == default_spml || NULL == default_spml[0] ||
0 == strlen(default_spml[0])) || (default_spml[0][0] == '^') ) {
#ifdef OSHMEM_HAS_IKRIT
opal_pointer_array_add(&mca_spml_base_spml, strdup("ikrit"));
#endif
opal_pointer_array_add(&mca_spml_base_spml, strdup("yoda"));
} else {
opal_pointer_array_add(&mca_spml_base_spml, strdup(default_spml[0]));
}
}
#endif
return OSHMEM_SUCCESS;
}
示例3: orte_rmaps_base_add_proc_to_map
int orte_rmaps_base_add_proc_to_map(orte_job_map_t *map, orte_node_t *node,
bool oversubscribed, orte_proc_t *proc)
{
orte_std_cntr_t i;
orte_node_t *node_from_map;
int rc;
/* see if this node has already been assigned to the map - if
* not, then add the pointer to the pointer array
*/
for (i=0; i < map->nodes->size; i++) {
if (NULL == (node_from_map = (orte_node_t*)opal_pointer_array_get_item(map->nodes, i))) {
continue;
}
if (node_from_map->index == node->index) {
/* we have this node in the array */
goto PROCESS;
}
}
/* if we get here, then this node isn't already in the map - add it */
OPAL_OUTPUT_VERBOSE((5, orte_rmaps_base.rmaps_output,
"%s rmaps:base: adding node %s to map",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(NULL == node->name) ? "NULL" : node->name));
if (ORTE_SUCCESS > (rc = opal_pointer_array_add(map->nodes, (void*)node))) {
ORTE_ERROR_LOG(rc);
return rc;
}
OBJ_RETAIN(node); /* maintain accounting on object */
++map->num_nodes;
PROCESS:
/* add the proc to this node's local processes - it is assumed
* that the proc isn't already there as this would be an error
* in the mapper
*/
OPAL_OUTPUT_VERBOSE((5, orte_rmaps_base.rmaps_output,
"%s rmaps:base: mapping proc for job %s to node %s whose daemon is %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
ORTE_JOBID_PRINT(proc->name.jobid),
(NULL == node->name) ? "NULL" : node->name,
(NULL == node->daemon) ? "NULL" : ORTE_NAME_PRINT(&(node->daemon->name))));
if (0 > (rc = opal_pointer_array_add(node->procs, (void*)proc))) {
ORTE_ERROR_LOG(rc);
return rc;
}
/* retain the proc struct so that we correctly track its release */
OBJ_RETAIN(proc);
++node->num_procs;
/* update the oversubscribed state of the node */
node->oversubscribed = oversubscribed;
return ORTE_SUCCESS;
}
示例4: killprocs
static void killprocs(orte_jobid_t job, orte_vpid_t vpid)
{
opal_pointer_array_t cmd;
orte_proc_t proc;
int rc;
if (ORTE_JOBID_WILDCARD == job
&& ORTE_VPID_WILDCARD == vpid) {
if (ORTE_SUCCESS != (rc = orte_odls.kill_local_procs(NULL))) {
ORTE_ERROR_LOG(rc);
}
return;
}
OBJ_CONSTRUCT(&cmd, opal_pointer_array_t);
OBJ_CONSTRUCT(&proc, orte_proc_t);
proc.name.jobid = job;
proc.name.vpid = vpid;
opal_pointer_array_add(&cmd, &proc);
if (ORTE_SUCCESS != (rc = orte_odls.kill_local_procs(&cmd))) {
ORTE_ERROR_LOG(rc);
}
OBJ_DESTRUCT(&cmd);
OBJ_DESTRUCT(&proc);
}
示例5: mca_btl_ugni_init_ep
int mca_btl_ugni_init_ep (mca_btl_ugni_module_t *ugni_module, mca_btl_ugni_endpoint_t **ep,
mca_btl_ugni_module_t *btl, opal_proc_t *peer_proc)
{
mca_btl_ugni_endpoint_t *endpoint;
int rc;
endpoint = OBJ_NEW(mca_btl_ugni_endpoint_t);
assert (endpoint != NULL);
endpoint->smsg_progressing = 0;
endpoint->state = MCA_BTL_UGNI_EP_STATE_INIT;
endpoint->peer_proc = peer_proc;
/* get the modex info for this endpoint and setup a ugni endpoint. this call may lead
* to re-entry through opal_progress(). */
rc = mca_btl_ugni_endpoint_get_modex (endpoint);
if (OPAL_SUCCESS != rc) {
assert (0);
return rc;
}
/* add this endpoint to the pointer array */
endpoint->index = opal_pointer_array_add (&ugni_module->endpoints, endpoint);
*ep = endpoint;
return OPAL_SUCCESS;
}
示例6: orte_debugger_init_before_spawn
/**
* Initialization of data structures for running under a debugger
* using the MPICH/TotalView parallel debugger interface. Before the
* spawn we need to check if we are being run under a TotalView-like
* debugger; if so then inform applications via an MCA parameter.
*/
void orte_debugger_init_before_spawn(orte_job_t *jdata)
{
char *env_name;
orte_app_context_t **apps, *app;
orte_std_cntr_t i;
int32_t ljob;
if (!MPIR_being_debugged && !orte_in_parallel_debugger) {
/* not being debugged - check if we want to enable
* later attachment by debugger
*/
if (orte_enable_debug_cospawn_while_running) {
/* setup a timer to wake us up periodically
* to check for debugger attach
*/
ORTE_TIMER_EVENT(orte_debugger_check_rate, 0, check_debugger);
}
return;
}
if (orte_debug_flag) {
opal_output(0, "Info: Spawned by a debugger");
}
/* tell the procs they are being debugged */
apps = (orte_app_context_t**)jdata->apps->addr;
env_name = mca_base_param_environ_variable("orte",
"in_parallel_debugger", NULL);
for (i=0; i < jdata->num_apps; i++) {
opal_setenv(env_name, "1", true, &apps[i]->env);
}
free(env_name);
/* check if we need to co-spawn the debugger daemons */
if ('\0' != MPIR_executable_path[0]) {
/* add debugger info to launch message */
orte_debugger_daemon = OBJ_NEW(orte_job_t);
/* create a jobid for these daemons - this is done solely
* to avoid confusing the rest of the system's bookkeeping
*/
orte_plm_base_create_jobid(orte_debugger_daemon);
/* flag the job as being debugger daemons */
orte_debugger_daemon->controls |= ORTE_JOB_CONTROL_DEBUGGER_DAEMON;
/* unless directed, we do not forward output */
if (!MPIR_forward_output) {
orte_debugger_daemon->controls &= ~ORTE_JOB_CONTROL_FORWARD_OUTPUT;
}
/* add it to the global job pool */
ljob = ORTE_LOCAL_JOBID(orte_debugger_daemon->jobid);
opal_pointer_array_set_item(orte_job_data, ljob, orte_debugger_daemon);
/* create an app_context for the debugger daemon */
app = OBJ_NEW(orte_app_context_t);
app->app = strdup((char*)MPIR_executable_path);
opal_argv_append_nosize(&app->argv, app->app);
build_debugger_args(app);
opal_pointer_array_add(orte_debugger_daemon->apps, &app->super);
orte_debugger_daemon->num_apps = 1;
}
}
示例7: orte_rmaps_base_setup_proc
orte_proc_t* orte_rmaps_base_setup_proc(orte_job_t *jdata,
orte_node_t *node,
orte_app_idx_t idx)
{
orte_proc_t *proc;
int rc;
proc = OBJ_NEW(orte_proc_t);
/* set the jobid */
proc->name.jobid = jdata->jobid;
/* flag the proc as ready for launch */
proc->state = ORTE_PROC_STATE_INIT;
proc->app_idx = idx;
OBJ_RETAIN(node); /* maintain accounting on object */
proc->node = node;
proc->nodename = node->name;
node->num_procs++;
if (node->slots_inuse < node->slots) {
node->slots_inuse += orte_rmaps_base.cpus_per_rank;
}
if (0 > (rc = opal_pointer_array_add(node->procs, (void*)proc))) {
ORTE_ERROR_LOG(rc);
OBJ_RELEASE(proc);
return NULL;
}
/* retain the proc struct so that we correctly track its release */
OBJ_RETAIN(proc);
return proc;
}
示例8: killprocs
static void killprocs(orte_jobid_t job, orte_vpid_t vpid)
{
opal_pointer_array_t cmd;
orte_proc_t proc;
int rc;
/* stop local sensors for this job */
if (ORTE_VPID_WILDCARD == vpid) {
orte_sensor.stop(job);
}
if (ORTE_JOBID_WILDCARD == job
&& ORTE_VPID_WILDCARD == vpid) {
if (ORTE_SUCCESS != (rc = orte_odls.kill_local_procs(NULL))) {
ORTE_ERROR_LOG(rc);
}
return;
}
OBJ_CONSTRUCT(&cmd, opal_pointer_array_t);
OBJ_CONSTRUCT(&proc, orte_proc_t);
proc.name.jobid = job;
proc.name.vpid = vpid;
ORTE_EPOCH_SET(proc.name.epoch,orte_ess.proc_get_epoch(&(proc.name)));
opal_pointer_array_add(&cmd, &proc);
if (ORTE_SUCCESS != (rc = orte_odls.kill_local_procs(&cmd))) {
ORTE_ERROR_LOG(rc);
}
OBJ_DESTRUCT(&cmd);
OBJ_DESTRUCT(&proc);
}
示例9: mca_pml_monitoring_component_open
static int mca_pml_monitoring_component_open(void)
{
if( mca_pml_monitoring_enabled ) {
opal_pointer_array_add(&mca_pml_base_pml,
strdup(mca_pml_monitoring_component.pmlm_version.mca_component_name));
}
return OMPI_SUCCESS;
}
示例10: ompi_op_construct
/*
* Op constructor
*/
static void ompi_op_construct(ompi_op_t *new_op)
{
int ret_val;
/* assign entry in fortran <-> c translation array */
ret_val = opal_pointer_array_add(ompi_op_f_to_c_table, new_op);
new_op->o_f_to_c_index = ret_val;
}
示例11: check_installed
static void check_installed(bool check_all)
{
int i, n;
orcm_cfgi_app_t *app;
orcm_cfgi_run_t *run;
/* run a check of the installed apps against
* the configured apps so we can start anything that was awaiting
* installation
*/
for (i=0; i < orcm_cfgi_base.installed_apps.size; i++) {
if (NULL == (app = (orcm_cfgi_app_t*)opal_pointer_array_get_item(&orcm_cfgi_base.installed_apps, i))) {
continue;
}
if (!check_all && !app->modified) {
OPAL_OUTPUT_VERBOSE((2, orcm_cfgi_base.output,
"APP %s HAS NOT BEEN MODIFIED",
app->application));
continue;
}
OPAL_OUTPUT_VERBOSE((2, orcm_cfgi_base.output,
"CHECKING INSTALL-RUNNING CONFIG FOR APP %s", app->application));
/* reset the flag */
app->modified = false;
/* search the configuration array for instances of this app */
for (n=0; n < orcm_cfgi_base.confgd_apps.size; n++) {
if (NULL == (run = (orcm_cfgi_run_t*)opal_pointer_array_get_item(&orcm_cfgi_base.confgd_apps, n))) {
continue;
}
if (NULL == run->app) {
/* still waiting for app to be defined - is this it? */
if (0 == strcmp(run->application, app->application)) {
/* yep - see if we can run it */
if (0 <= app->max_instances && app->max_instances <= app->num_instances) {
/* at our limit - can't run at this time */
continue;
}
/* add this instance */
run->app = app;
run->app_idx = opal_pointer_array_add(&app->instances, run);
app->num_instances++;
link_launch(app, run, check_all);
}
} else if (0 == strcmp(run->application, app->application)) {
OPAL_OUTPUT_VERBOSE((2, orcm_cfgi_base.output,
"%s EXISTING INSTANCE %s:%s CAN BE LAUNCHED",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
run->application, run->instance));
/* check to see what has changed, and launch it if reqd */
link_launch(app, run, check_all);
}
}
}
}
示例12: _terminate_job
static void _terminate_job(orte_jobid_t jobid)
{
opal_pointer_array_t procs;
orte_proc_t pobj;
OBJ_CONSTRUCT(&procs, opal_pointer_array_t);
opal_pointer_array_init(&procs, 1, 1, 1);
OBJ_CONSTRUCT(&pobj, orte_proc_t);
pobj.name.jobid = jobid;
pobj.name.vpid = ORTE_VPID_WILDCARD;
opal_pointer_array_add(&procs, &pobj);
orte_plm.terminate_procs(&procs);
OBJ_DESTRUCT(&procs);
OBJ_DESTRUCT(&pobj);
}
示例13: ompi_group_construct
/*
* group constructor
*/
static void ompi_group_construct(ompi_group_t *new_group)
{
int ret_val;
/* Note that we do *NOT* increase the refcount on all the included
procs here because that is handled at a different level (e.g.,
the proc counts are not decreased during the desstructor,
either). */
/* assign entry in fortran <-> c translation array */
ret_val = opal_pointer_array_add(&ompi_group_f_to_c_table, new_group);
new_group->grp_f_to_c_index = ret_val;
new_group->grp_flags = 0;
/* default the sparse values for groups */
new_group->grp_parent_group_ptr = NULL;
}
示例14: file_constructor
/*
* Constructor
*/
static void file_constructor(ompi_file_t *file)
{
/* Initialize the MPI_FILE_OPEN params */
file->f_comm = NULL;
file->f_filename = NULL;
file->f_amode = 0;
file->f_info = NULL;
/* Initialize flags */
file->f_flags = 0;
/* Initialize the fortran <--> C translation index */
file->f_f_to_c_index = opal_pointer_array_add(&ompi_file_f_to_c_table,
file);
/* Initialize the error handler. Per MPI-2:9.7 (p265), the
default error handler on file handles is the error handler on
MPI_FILE_NULL, which starts out as MPI_ERRORS_RETURN (but can
be changed by invoking MPI_FILE_SET_ERRHANDLER on
MPI_FILE_NULL). */
file->errhandler_type = OMPI_ERRHANDLER_TYPE_FILE;
if (file != &ompi_mpi_file_null.file) {
file->error_handler = ompi_mpi_file_null.file.error_handler;
} else {
file->error_handler = &ompi_mpi_errors_return.eh;
}
OBJ_RETAIN(file->error_handler);
/* Initialize the module */
file->f_io_version = MCA_IO_BASE_V_NONE;
memset(&(file->f_io_selected_module), 0,
sizeof(file->f_io_selected_module));
file->f_io_selected_data = NULL;
/* If the user doesn't want us to ever free it, then add an extra
RETAIN here */
if (ompi_debug_no_free_handles) {
OBJ_RETAIN(&(file->super));
}
}
示例15: ompi_errhandler_construct
/**
* Errhandler constructor
*/
static void ompi_errhandler_construct(ompi_errhandler_t *new_errhandler)
{
int ret_val;
/* assign entry in fortran <-> c translation array */
ret_val = opal_pointer_array_add(&ompi_errhandler_f_to_c_table,
new_errhandler);
new_errhandler->eh_f_to_c_index = ret_val;
new_errhandler->eh_lang = OMPI_ERRHANDLER_LANG_C;
new_errhandler->eh_comm_fn = NULL;
new_errhandler->eh_win_fn = NULL;
new_errhandler->eh_file_fn = NULL;
new_errhandler->eh_fort_fn = NULL;
new_errhandler->eh_cxx_dispatch_fn = NULL;
memset (new_errhandler->eh_name, 0, MPI_MAX_OBJECT_NAME);
}