本文整理汇总了C++中OBJ_DESTRUCT函数的典型用法代码示例。如果您正苦于以下问题:C++ OBJ_DESTRUCT函数的具体用法?C++ OBJ_DESTRUCT怎么用?C++ OBJ_DESTRUCT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了OBJ_DESTRUCT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mca_oob_ud_module_fini
static void mca_oob_ud_module_fini (mca_oob_ud_peer_t **peer)
{
opal_process_name_t key;
void *node1, *node2;
int rc;
rc = opal_proc_table_get_first_key (&mca_oob_ud_module.peers, &key,
(void **) peer, &node1, &node2);
if (OPAL_SUCCESS == rc) {
do {
if (NULL != *peer) {
mca_oob_ud_peer_release (*peer);
}
rc = opal_proc_table_get_next_key (&mca_oob_ud_module.peers, &key,
(void **) peer, node1, &node1, node2, &node2);
} while (OPAL_SUCCESS == rc);
}
opal_proc_table_remove_all(&mca_oob_ud_module.peers);
OBJ_DESTRUCT(&mca_oob_ud_module.peers);
return;
}
示例2: xcast_recv
//.........这里部分代码省略.........
ORTE_ERROR_LOG(ret);
goto relay;
}
if (ORTE_DAEMON_ADD_LOCAL_PROCS == command) {
OBJ_RELEASE(relay);
relay = OBJ_NEW(opal_buffer_t);
/* repack the command */
if (OPAL_SUCCESS != (ret = opal_dss.pack(relay, &command, 1, ORTE_DAEMON_CMD))) {
ORTE_ERROR_LOG(ret);
goto relay;
}
if (0 == flag) {
/* copy the remainder of the payload */
opal_dss.copy_payload(relay, buffer);
/* no - just return */
goto relay;
}
}
/* unpack the byte object */
cnt=1;
if (ORTE_SUCCESS != (ret = opal_dss.unpack(buffer, &bo, &cnt, OPAL_BYTE_OBJECT))) {
ORTE_ERROR_LOG(ret);
goto relay;
}
if (0 < bo->size) {
/* load it into a buffer */
OBJ_CONSTRUCT(&wireup, opal_buffer_t);
opal_dss.load(&wireup, bo->bytes, bo->size);
/* pass it for processing */
if (ORTE_SUCCESS != (ret = orte_routed.init_routes(ORTE_PROC_MY_NAME->jobid, &wireup))) {
ORTE_ERROR_LOG(ret);
OBJ_DESTRUCT(&wireup);
goto relay;
}
/* done with the wireup buffer - dump it */
OBJ_DESTRUCT(&wireup);
}
free(bo);
if (ORTE_DAEMON_ADD_LOCAL_PROCS == command) {
/* copy the remainder of the payload */
opal_dss.copy_payload(relay, buffer);
}
}
} else {
ORTE_ERROR_LOG(ret);
goto CLEANUP;
}
}
relay:
/* get the list of next recipients from the routed module */
orte_routed.get_routing_list(&coll);
/* if list is empty, no relay is required */
if (opal_list_is_empty(&coll)) {
OPAL_OUTPUT_VERBOSE((5, orte_grpcomm_base_framework.framework_output,
"%s grpcomm:direct:send_relay - recipient list is empty!",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME)));
OBJ_RELEASE(rly);
goto CLEANUP;
}
/* send the message to each recipient on list, deconstructing it as we go */
示例3: ompi_mpi_finalize
//.........这里部分代码省略.........
return ret;
}
/* wait for barrier to complete */
OMPI_LAZY_WAIT_FOR_COMPLETION(coll->active);
OBJ_RELEASE(coll);
/* check for timing request - get stop time and report elapsed
time if so */
if (ompi_enable_timing && 0 == OMPI_PROC_MY_NAME->vpid) {
gettimeofday(&ompistop, NULL);
opal_output(0, "ompi_mpi_finalize[%ld]: time to execute barrier %ld usec",
(long)OMPI_PROC_MY_NAME->vpid,
(long int)((ompistop.tv_sec - ompistart.tv_sec)*1000000 +
(ompistop.tv_usec - ompistart.tv_usec)));
}
/*
* Shutdown the Checkpoint/Restart Mech.
*/
if (OMPI_SUCCESS != (ret = ompi_cr_finalize())) {
OMPI_ERROR_LOG(ret);
}
/* Shut down any bindings-specific issues: C++, F77, F90 */
/* Remove all memory associated by MPI_REGISTER_DATAREP (per
MPI-2:9.5.3, there is no way for an MPI application to
*un*register datareps, but we don't want the OMPI layer causing
memory leaks). */
while (NULL != (item = opal_list_remove_first(&ompi_registered_datareps))) {
OBJ_RELEASE(item);
}
OBJ_DESTRUCT(&ompi_registered_datareps);
/* Remove all F90 types from the hash tables. As the OBJ_DESTRUCT will
* call a special destructor able to release predefined types, we can
* simply call the OBJ_DESTRUCT on the hash table and all memory will
* be correctly released.
*/
OBJ_DESTRUCT( &ompi_mpi_f90_integer_hashtable );
OBJ_DESTRUCT( &ompi_mpi_f90_real_hashtable );
OBJ_DESTRUCT( &ompi_mpi_f90_complex_hashtable );
/* Free communication objects */
/* free file resources */
if (OMPI_SUCCESS != (ret = ompi_file_finalize())) {
return ret;
}
/* free window resources */
if (OMPI_SUCCESS != (ret = ompi_win_finalize())) {
return ret;
}
if (OMPI_SUCCESS != (ret = ompi_osc_base_finalize())) {
return ret;
}
/* free pml resource */
if(OMPI_SUCCESS != (ret = mca_pml_base_finalize())) {
return ret;
}
/* free communicator resources */
if (OMPI_SUCCESS != (ret = ompi_comm_finalize())) {
return ret;
示例4: des
static void des(ompi_orte_tracker_t *p)
{
OBJ_DESTRUCT(&p->lock);
OBJ_DESTRUCT(&p->cond);
}
示例5: s1_init
static int s1_init(void)
{
PMI_BOOL initialized;
int spawned;
int rc, ret = OPAL_ERROR;
int i, rank, lrank, nrank;
char *pmix_id, tmp[64];
opal_value_t kv;
char *str;
uint32_t ui32;
opal_process_name_t ldr;
char **localranks=NULL;
if (PMI_SUCCESS != (rc = PMI_Initialized(&initialized))) {
OPAL_PMI_ERROR(rc, "PMI_Initialized");
return OPAL_ERROR;
}
if (PMI_TRUE != initialized && PMI_SUCCESS != (rc = PMI_Init(&spawned))) {
OPAL_PMI_ERROR(rc, "PMI_Init");
return OPAL_ERROR;
}
// setup hash table
opal_pmix_base_hash_init();
// Initialize space demands
rc = PMI_KVS_Get_value_length_max(&pmix_vallen_max);
if (PMI_SUCCESS != rc) {
OPAL_PMI_ERROR(rc, "PMI_KVS_Get_value_length_max");
goto err_exit;
}
pmix_vallen_threshold = pmix_vallen_max * 3;
pmix_vallen_threshold >>= 2;
rc = PMI_KVS_Get_name_length_max(&pmix_kvslen_max);
if (PMI_SUCCESS != rc) {
OPAL_PMI_ERROR(rc, "PMI_KVS_Get_name_length_max");
goto err_exit;
}
rc = PMI_KVS_Get_key_length_max(&pmix_keylen_max);
if (PMI_SUCCESS != rc) {
OPAL_PMI_ERROR(rc, "PMI_KVS_Get_key_length_max");
goto err_exit;
}
// Initialize job environment information
pmix_id = (char*)malloc(pmix_vallen_max);
if (pmix_id == NULL) {
ret = OPAL_ERR_OUT_OF_RESOURCE;
goto err_exit;
}
/* Get domain id */
if (PMI_SUCCESS != (rc = PMI_Get_kvs_domain_id(pmix_id, pmix_vallen_max))) {
free(pmix_id);
goto err_exit;
}
/* get our rank */
ret = PMI_Get_rank(&rank);
if( PMI_SUCCESS != ret ) {
OPAL_PMI_ERROR(ret, "PMI_Get_rank");
free(pmix_id);
goto err_exit;
}
/* Slurm PMI provides the job id as an integer followed
* by a '.', followed by essentially a stepid. The first integer
* defines an overall job number. The second integer is the number of
* individual jobs we have run within that allocation. */
s1_pname.jobid = strtoul(pmix_id, &str, 10);
s1_pname.jobid = (s1_pname.jobid << 16) & 0xffff0000;
if (NULL != str) {
ui32 = strtoul(str, NULL, 10);
s1_pname.jobid |= (ui32 & 0x0000ffff);
}
ldr.jobid = s1_pname.jobid;
s1_pname.vpid = rank;
/* store our name in the opal_proc_t so that
* debug messages will make sense - an upper
* layer will eventually overwrite it, but that
* won't do any harm */
opal_proc_set_name(&s1_pname);
opal_output_verbose(2, opal_pmix_base_framework.framework_output,
"%s pmix:s1: assigned tmp name",
OPAL_NAME_PRINT(s1_pname));
OBJ_CONSTRUCT(&kv, opal_value_t);
kv.key = strdup(OPAL_PMIX_JOBID);
kv.type = OPAL_UINT32;
kv.data.uint32 = s1_pname.jobid;
if (OPAL_SUCCESS != (ret = opal_pmix_base_store(&OPAL_PROC_MY_NAME, &kv))) {
OPAL_ERROR_LOG(ret);
OBJ_DESTRUCT(&kv);
goto err_exit;
}
OBJ_DESTRUCT(&kv);
/* save it */
//.........这里部分代码省略.........
示例6: main
int
main(int argc, char *argv[]){
int count;
int msgsize;
uint8_t *msg;
int i, j, rc;
orte_process_name_t peer;
double maxpower;
opal_buffer_t *buf;
orte_rml_recv_cb_t blob;
/*
* Init
*/
orte_init(&argc, &argv, ORTE_PROC_NON_MPI);
if (argc > 1) {
count = atoi(argv[1]);
if (count < 0) {
count = INT_MAX-1;
}
} else {
count = MAX_COUNT;
}
peer.jobid = ORTE_PROC_MY_NAME->jobid;
peer.vpid = ORTE_PROC_MY_NAME->vpid + 1;
if (peer.vpid == orte_process_info.num_procs) {
peer.vpid = 0;
}
for (j=1; j < count+1; j++) {
/* rank0 starts ring */
if (ORTE_PROC_MY_NAME->vpid == 0) {
/* setup the initiating buffer - put random sized message in it */
buf = OBJ_NEW(opal_buffer_t);
maxpower = (double)(j%7);
msgsize = (int)pow(10.0, maxpower);
opal_output(0, "Ring %d message size %d bytes", j, msgsize);
msg = (uint8_t*)malloc(msgsize);
opal_dss.pack(buf, msg, msgsize, OPAL_BYTE);
free(msg);
orte_rml.send_buffer_nb(&peer, buf, MY_TAG, orte_rml_send_callback, NULL);
/* wait for it to come around */
OBJ_CONSTRUCT(&blob, orte_rml_recv_cb_t);
blob.active = true;
orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, MY_TAG,
ORTE_RML_NON_PERSISTENT,
orte_rml_recv_callback, &blob);
ORTE_WAIT_FOR_COMPLETION(blob.active);
OBJ_DESTRUCT(&blob);
opal_output(0, "%s Ring %d completed", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), j);
} else {
/* wait for msg */
OBJ_CONSTRUCT(&blob, orte_rml_recv_cb_t);
blob.active = true;
orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD, MY_TAG,
ORTE_RML_NON_PERSISTENT,
orte_rml_recv_callback, &blob);
ORTE_WAIT_FOR_COMPLETION(blob.active);
opal_output(0, "%s received message %d from %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), j, ORTE_NAME_PRINT(&blob.name));
/* send it along */
buf = OBJ_NEW(opal_buffer_t);
opal_dss.copy_payload(buf, &blob.data);
OBJ_DESTRUCT(&blob);
msg_active = true;
orte_rml.send_buffer_nb(&peer, buf, MY_TAG, send_callback, NULL);
ORTE_WAIT_FOR_COMPLETION(msg_active);
}
}
orte_finalize();
return 0;
}
示例7: verbs_runtime_query
//.........这里部分代码省略.........
/* Allocate the protection domain for the device */
device->ib_pd = ibv_alloc_pd(device->ib_dev_context);
if (NULL == device->ib_pd) {
rc = OSHMEM_ERR_RESOURCE_BUSY;
goto out;
}
/* Allocate memory */
if (!rc) {
void *addr = NULL;
size_t size = getpagesize();
struct ibv_mr *ib_mr = NULL;
uint64_t access_flag = IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE |
IBV_ACCESS_REMOTE_READ;
uint64_t exp_access_flag = 0;
OBJ_CONSTRUCT(&device->ib_mr_array, opal_value_array_t);
opal_value_array_init(&device->ib_mr_array, sizeof(struct ibv_mr *));
#if defined(MPAGE_ENABLE) && (MPAGE_ENABLE > 0)
exp_access_flag = IBV_EXP_ACCESS_ALLOCATE_MR |
IBV_EXP_ACCESS_SHARED_MR_USER_READ |
IBV_EXP_ACCESS_SHARED_MR_USER_WRITE;
#endif /* MPAGE_ENABLE */
struct ibv_exp_reg_mr_in in = {device->ib_pd, addr, size, access_flag|exp_access_flag, 0};
ib_mr = ibv_exp_reg_mr(&in);
if (NULL == ib_mr) {
rc = OSHMEM_ERR_OUT_OF_RESOURCE;
} else {
device->ib_mr_shared = ib_mr;
opal_value_array_append_item(&device->ib_mr_array, &ib_mr);
}
#if defined(MPAGE_ENABLE) && (MPAGE_ENABLE > 0)
if (!rc) {
struct ibv_exp_reg_shared_mr_in in_smr;
access_flag = IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE |
IBV_ACCESS_REMOTE_READ|
IBV_EXP_ACCESS_NO_RDMA;
addr = (void *)mca_sshmem_base_start_address;
mca_sshmem_verbs_fill_shared_mr(&in_smr, device->ib_pd, device->ib_mr_shared->handle, addr, access_flag);
ib_mr = ibv_exp_reg_shared_mr(&in_smr);
if (NULL == ib_mr) {
mca_sshmem_verbs_component.has_shared_mr = 0;
} else {
opal_value_array_append_item(&device->ib_mr_array, &ib_mr);
mca_sshmem_verbs_component.has_shared_mr = 1;
}
}
#endif /* MPAGE_ENABLE */
}
/* all is well - rainbows and butterflies */
if (!rc) {
*priority = mca_sshmem_verbs_component.priority;
*module = (mca_base_module_t *)&mca_sshmem_verbs_module.super;
}
out:
if (device) {
if (opal_value_array_get_size(&device->ib_mr_array)) {
struct ibv_mr** array;
struct ibv_mr* ib_mr = NULL;
array = OPAL_VALUE_ARRAY_GET_BASE(&device->ib_mr_array, struct ibv_mr *);
while (opal_value_array_get_size(&device->ib_mr_array) > 0) {
ib_mr = array[0];
ibv_dereg_mr(ib_mr);
opal_value_array_remove_item(&device->ib_mr_array, 0);
}
if (device->ib_mr_shared) {
device->ib_mr_shared = NULL;
}
OBJ_DESTRUCT(&device->ib_mr_array);
}
if (device->ib_pd) {
ibv_dealloc_pd(device->ib_pd);
device->ib_pd = NULL;
}
if(device->ib_dev_context) {
ibv_close_device(device->ib_dev_context);
device->ib_dev_context = NULL;
}
if(device->ib_devs) {
ibv_free_device_list(device->ib_devs);
device->ib_devs = NULL;
}
}
return rc;
}
示例8: discover
//.........这里部分代码省略.........
ret = ORTE_ERROR;
goto cleanup;
}
/* Do we have enough processors on the available nodes?
* Question: How do we get the required number of processors?
*/
if ( (Status == NodeStatus_Ready) && (idle_processors > 0) ) {
/* Get node name. */
hr = pNode->get_Name(&node_name);
if (FAILED(hr)) {
OPAL_OUTPUT_VERBOSE((1, orte_ras_base.ras_output,
"ras:ccp:pNode->get_Name failed."));
ret = ORTE_ERROR;
goto cleanup;
}
/* Get node processor architecture. */
hr = pNode->get_ProcessorArchitecture(&node_arch);
if (FAILED(hr)) {
OPAL_OUTPUT_VERBOSE((1, orte_ras_base.ras_output,
"ras:ccp:pNode->get_ProcessorArchitecture failed."));
ret = ORTE_ERROR;
goto cleanup;
}
/* Prevent duplicated nodes in the list*/
for (item = opal_list_get_first(&new_nodes);
opal_list_get_end(&new_nodes) != item;
item = opal_list_get_next(item)) {
node = (orte_node_t*) item;
if (0 == strcmp(node->name, (char *)node_name)) {
++node->slots;
OPAL_OUTPUT_VERBOSE((1, orte_ras_base.ras_output,
"ras:ccp:allocate:discover: found -- bumped slots to %d",
node->slots));
break;
}
}
/* Did we find it? */
if (opal_list_get_end(&new_nodes) == item) {
/* Nope -- didn't find it, so add a new item to the list */
OPAL_OUTPUT_VERBOSE((1, orte_ras_base.ras_output,
"ras:ccp:allocate:discover: not found -- added to list"));
node = OBJ_NEW(orte_node_t);
/* The function _dupenv_s is much safer than getenv on Windows. */
_dupenv_s(&node->username, &len, "username");
node->name = _com_util::ConvertBSTRToString(node_name);
node->launch_id = nodeid;
node->slots_inuse = 0;
node->slots_max = 0;
node->slots = 1;
opal_list_append(nodelist, &node->super);
}
/* up the nodeid */
nodeid++;
}
pNode->Release();
VariantClear(&var);
}
pNodes->Release();
if (nodeid > 0) ret = ORTE_SUCCESS;
/* All done */
cleanup:
if (ORTE_SUCCESS == ret) {
OPAL_OUTPUT_VERBOSE((1, orte_ras_base.ras_output,
"ras:ccp:allocate:discover: success"));
} else {
OPAL_OUTPUT_VERBOSE((1, orte_ras_base.ras_output,
"ras:ccp:allocate:discover: failed (rc=%d)", ret));
}
OBJ_DESTRUCT(&new_nodes);
SysFreeString(node_name);
SysFreeString(node_arch);
/* check for timing request - get stop time and report elapsed time if so */
if (orte_timing) {
gettimeofday(&stop, NULL);
opal_output(0, "ras_ccp: time to allocate is %ld usec",
(long int)((stop.tv_sec - start.tv_sec)*1000000 +
(stop.tv_usec - start.tv_usec)));
gettimeofday(&start, NULL);
}
return ret;
}
示例9: vprotocol_pessimist_event_logger_connect
int vprotocol_pessimist_event_logger_connect(int el_rank, ompi_communicator_t **el_comm)
{
int rc;
opal_buffer_t buffer;
char *port;
orte_process_name_t el_proc;
char *hnp_uri, *rml_uri;
orte_rml_tag_t el_tag;
char name[MPI_MAX_PORT_NAME];
int rank;
vprotocol_pessimist_clock_t connect_info[2];
snprintf(name, MPI_MAX_PORT_NAME, VPROTOCOL_EVENT_LOGGER_NAME_FMT, el_rank);
port = ompi_pubsub.lookup(name, MPI_INFO_NULL);
if(NULL == port)
{
return OMPI_ERR_NOT_FOUND;
}
V_OUTPUT_VERBOSE(45, "Found port < %s >", port);
/* separate the string into the HNP and RML URI and tag */
if (OMPI_SUCCESS != (rc = ompi_dpm.parse_port(port, &hnp_uri, &rml_uri, &el_tag))) {
ORTE_ERROR_LOG(rc);
return rc;
}
/* extract the originating proc's name */
if (ORTE_SUCCESS != (rc = orte_rml_base_parse_uris(rml_uri, &el_proc, NULL))) {
ORTE_ERROR_LOG(rc);
free(rml_uri); free(hnp_uri);
return rc;
}
/* make sure we can route rml messages to the destination */
if (OMPI_SUCCESS != (rc = ompi_dpm.route_to_port(hnp_uri, &el_proc))) {
ORTE_ERROR_LOG(rc);
free(rml_uri); free(hnp_uri);
return rc;
}
free(rml_uri); free(hnp_uri);
/* Send an rml message to tell the remote end to wake up and jump into
* connect/accept */
OBJ_CONSTRUCT(&buffer, opal_buffer_t);
rc = orte_rml.send_buffer(&el_proc, &buffer, el_tag+1, 0);
if(ORTE_SUCCESS > rc) {
ORTE_ERROR_LOG(rc);
OBJ_DESTRUCT(&buffer);
return rc;
}
OBJ_DESTRUCT(&buffer);
rc = ompi_dpm.connect_accept(MPI_COMM_SELF, 0, port, true, el_comm);
if(OMPI_SUCCESS != rc) {
ORTE_ERROR_LOG(rc);
}
/* Send Rank, receive max buffer size and max_clock back */
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
rc = mca_pml_v.host_pml.pml_send(&rank, 1, MPI_INTEGER, 0,
VPROTOCOL_PESSIMIST_EVENTLOG_NEW_CLIENT_CMD,
MCA_PML_BASE_SEND_STANDARD,
mca_vprotocol_pessimist.el_comm);
if(OPAL_UNLIKELY(MPI_SUCCESS != rc))
OMPI_ERRHANDLER_INVOKE(mca_vprotocol_pessimist.el_comm, rc,
__FILE__ ": failed sending event logger handshake");
rc = mca_pml_v.host_pml.pml_recv(&connect_info, 2, MPI_UNSIGNED_LONG_LONG,
0, VPROTOCOL_PESSIMIST_EVENTLOG_NEW_CLIENT_CMD,
mca_vprotocol_pessimist.el_comm, MPI_STATUS_IGNORE);
if(OPAL_UNLIKELY(MPI_SUCCESS != rc)) \
OMPI_ERRHANDLER_INVOKE(mca_vprotocol_pessimist.el_comm, rc, \
__FILE__ ": failed receiving event logger handshake");
return rc;
}
示例10: ofacm_base_proc_destructor
static void ofacm_base_proc_destructor (opal_common_ofacm_base_proc_t *proc)
{
OBJ_DESTRUCT(&proc->all_contexts);
}
示例11: main
int
main(int argc, char *argv[])
{
orcm_alloc_t alloc, *aptr;
orte_rml_recv_cb_t xfer;
opal_buffer_t *buf;
int rc, n;
orcm_scd_cmd_flag_t command=ORCM_SESSION_REQ_COMMAND;
orcm_alloc_id_t id;
struct timeval tv;
/* initialize, parse command line, and setup frameworks */
orcm_osub_init(argc, argv);
/* create an allocation request */
OBJ_CONSTRUCT(&alloc, orcm_alloc_t);
alloc.priority = 1; // session priority
alloc.account = orcm_osub_globals.account; // account to be charged
alloc.name = orcm_osub_globals.name; // user-assigned project name
alloc.gid = orcm_osub_globals.gid; // group id to be run under
alloc.max_nodes = orcm_osub_globals.max_nodes; // max number of nodes
alloc.max_pes = orcm_osub_globals.max_pes; // max number of processing elements
alloc.min_nodes = orcm_osub_globals.min_nodes; // min number of nodes required
alloc.min_pes = orcm_osub_globals.min_pes; // min number of pe's required
alloc.exclusive = orcm_osub_globals.exclusive; // true if nodes to be exclusively allocated (i.e., not shared across sessions)
alloc.interactive = orcm_osub_globals.interactive; // true if in interactive mode
alloc.nodes = '\0'; // regex of nodes to be used
alloc.parent_name = ORTE_NAME_PRINT(ORTE_PROC_MY_NAME); // my_daemon_name
alloc.parent_uri = '\0'; // my_daemon uri address
/* alloc.constraints = orcm_osub_globals.resources */ ; // list of resource constraints to be applied when selecting hosts
alloc.hnpname = '\0'; //my hnp name
alloc.hnpuri = '\0'; //my hnp uri
alloc.caller_uid = getuid(); // caller uid, not from args
alloc.caller_gid = getgid(); // caller gid, not from args
if (NULL == orcm_osub_globals.starttime || 0 == strlen(orcm_osub_globals.starttime)) {
gettimeofday(&tv,NULL);
/* desired start time for allocation deafults to now */
alloc.begin = tv.tv_sec;
} else {
/* TODO: eventually parse the string to figure out what user means, for now its now */
gettimeofday(&tv,NULL);
alloc.begin = tv.tv_sec;
}
if (NULL == orcm_osub_globals.walltime || 0 == strlen(orcm_osub_globals.walltime)) {
/* desired walltime default to 10 min */
alloc.walltime = 600;
} else {
/* get this in seconds for now, but will be parsed for more complexity later */
alloc.walltime = (time_t)strtol(orcm_osub_globals.walltime, NULL, 10); // max execution time
}
/* setup to receive the result */
OBJ_CONSTRUCT(&xfer, orte_rml_recv_cb_t);
xfer.active = true;
orte_rml.recv_buffer_nb(ORTE_NAME_WILDCARD,
ORCM_RML_TAG_SCD,
ORTE_RML_NON_PERSISTENT,
orte_rml_recv_callback, &xfer);
/* send it to the scheduler */
buf = OBJ_NEW(opal_buffer_t);
/* pack the alloc command flag */
if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &command,1, ORCM_SCD_CMD_T))) {
ORTE_ERROR_LOG(rc);
return rc;
}
aptr = &alloc;
if (OPAL_SUCCESS != (rc = opal_dss.pack(buf, &aptr, 1, ORCM_ALLOC))) {
ORTE_ERROR_LOG(rc);
return rc;
}
if (ORTE_SUCCESS != (rc = orte_rml.send_buffer_nb(ORTE_PROC_MY_SCHEDULER, buf,
ORCM_RML_TAG_SCD,
orte_rml_send_callback, NULL))) {
ORTE_ERROR_LOG(rc);
OBJ_RELEASE(buf);
OBJ_DESTRUCT(&xfer);
return rc;
}
/* get our allocated jobid */
n=1;
ORTE_WAIT_FOR_COMPLETION(xfer.active);
if (OPAL_SUCCESS != (rc = opal_dss.unpack(&xfer.data, &id, &n, ORCM_ALLOC_ID_T))) {
ORTE_ERROR_LOG(rc);
OBJ_DESTRUCT(&xfer);
return rc;
}
opal_output(0, "RECEIVED ALLOC ID %d", (int)id);
if (ORTE_SUCCESS != orcm_finalize()) {
fprintf(stderr, "Failed orcm_finalize\n");
exit(1);
}
//.........这里部分代码省略.........
示例12: mca_fcoll_base_file_select
//.........这里部分代码省略.........
if (NULL == component->fcollm_file_query) {
opal_output_verbose(10, mca_fcoll_base_output,
"select: no query, ignoring the component");
} else {
/*
* call the query function and see what it returns
*/
module = component->fcollm_file_query (file, &priority);
if (NULL == module ||
NULL == module->fcoll_module_init) {
/*
* query did not return any action which can be used
*/
opal_output_verbose(10, mca_fcoll_base_output,
"select: query returned failure");
} else {
opal_output_verbose(10, mca_fcoll_base_output,
"select: query returned priority %d",
priority);
/*
* is this the best component we have found till now?
*/
if (priority > best_priority) {
best_priority = priority;
best_component = component;
}
om = OBJ_NEW(queried_module_t);
/*
* check if we have run out of space
*/
if (NULL == om) {
OBJ_DESTRUCT(&queried);
return OMPI_ERR_OUT_OF_RESOURCE;
}
om->om_component = component;
om->om_module = module;
opal_list_append(&queried, (opal_list_item_t *)om);
} /* end else of if (NULL == module) */
} /* end else of if (NULL == component->fcollm_init) */
} /* end for ... end of traversal */
/* We have to remove empty out the selectable list if the selectable
* list was constructed as a duplicate and not as a pointer to the
* mca_base_components_available list. So, check and destroy */
if (was_selectable_constructed) {
/* remove all the items first */
for (item = opal_list_get_first(&mca_fcoll_base_components_available);
item != opal_list_get_end(&mca_fcoll_base_components_available);
item = next_item) {
next_item = opal_list_get_next(item);
OBJ_RELEASE (item);
}
/* release the list itself */
OBJ_RELEASE (selectable);
was_selectable_constructed = false;
}
/*
* Now we have alist of components which successfully returned
* their module struct. One of these components has the best
* priority. The rest have to be comm_unqueried to counter the
示例13: orte_ess_base_app_setup
int orte_ess_base_app_setup(bool db_restrict_local)
{
int ret;
char *error = NULL;
opal_value_t kv;
/*
* stdout/stderr buffering
* If the user requested to override the default setting then do
* as they wish.
*/
if( orte_ess_base_std_buffering > -1 ) {
if( 0 == orte_ess_base_std_buffering ) {
setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stderr, NULL, _IONBF, 0);
}
else if( 1 == orte_ess_base_std_buffering ) {
setvbuf(stdout, NULL, _IOLBF, 0);
setvbuf(stderr, NULL, _IOLBF, 0);
}
else if( 2 == orte_ess_base_std_buffering ) {
setvbuf(stdout, NULL, _IOFBF, 0);
setvbuf(stderr, NULL, _IOFBF, 0);
}
}
/* if I am an MPI app, we will let the MPI layer define and
* control the opal_proc_t structure. Otherwise, we need to
* do so here */
if (ORTE_PROC_NON_MPI) {
orte_process_info.super.proc_name = *(opal_process_name_t*)ORTE_PROC_MY_NAME;
orte_process_info.super.proc_hostname = strdup(orte_process_info.nodename);
orte_process_info.super.proc_flags = OPAL_PROC_ALL_LOCAL;
orte_process_info.super.proc_arch = opal_local_arch;
opal_proc_local_set(&orte_process_info.super);
}
/* get an async event base - we use the opal_async one so
* we don't startup extra threads if not needed */
orte_event_base = opal_progress_thread_init(NULL);
progress_thread_running = true;
/* open and setup the state machine */
if (ORTE_SUCCESS != (ret = mca_base_framework_open(&orte_state_base_framework, 0))) {
ORTE_ERROR_LOG(ret);
error = "orte_state_base_open";
goto error;
}
if (ORTE_SUCCESS != (ret = orte_state_base_select())) {
ORTE_ERROR_LOG(ret);
error = "orte_state_base_select";
goto error;
}
/* open the errmgr */
if (ORTE_SUCCESS != (ret = mca_base_framework_open(&orte_errmgr_base_framework, 0))) {
ORTE_ERROR_LOG(ret);
error = "orte_errmgr_base_open";
goto error;
}
/* setup my session directory */
if (orte_create_session_dirs) {
OPAL_OUTPUT_VERBOSE((2, orte_ess_base_framework.framework_output,
"%s setting up session dir with\n\ttmpdir: %s\n\thost %s",
ORTE_NAME_PRINT(ORTE_PROC_MY_NAME),
(NULL == orte_process_info.tmpdir_base) ? "UNDEF" : orte_process_info.tmpdir_base,
orte_process_info.nodename));
if (ORTE_SUCCESS != (ret = orte_session_dir(true,
orte_process_info.tmpdir_base,
orte_process_info.nodename, NULL,
ORTE_PROC_MY_NAME))) {
ORTE_ERROR_LOG(ret);
error = "orte_session_dir";
goto error;
}
/* Once the session directory location has been established, set
the opal_output env file location to be in the
proc-specific session directory. */
opal_output_set_output_file_info(orte_process_info.proc_session_dir,
"output-", NULL, NULL);
/* store the session directory location */
OBJ_CONSTRUCT(&kv, opal_value_t);
kv.key = strdup(OPAL_PMIX_NSDIR);
kv.type = OPAL_STRING;
kv.data.string = strdup(orte_process_info.job_session_dir);
if (OPAL_SUCCESS != (ret = opal_pmix.store_local(ORTE_PROC_MY_NAME, &kv))) {
ORTE_ERROR_LOG(ret);
OBJ_DESTRUCT(&kv);
error = "opal pmix put job sessiondir";
goto error;
}
OBJ_DESTRUCT(&kv);
OBJ_CONSTRUCT(&kv, opal_value_t);
kv.key = strdup(OPAL_PMIX_PROCDIR);
kv.type = OPAL_STRING;
kv.data.string = strdup(orte_process_info.proc_session_dir);
if (OPAL_SUCCESS != (ret = opal_pmix.store_local(ORTE_PROC_MY_NAME, &kv))) {
ORTE_ERROR_LOG(ret);
OBJ_DESTRUCT(&kv);
error = "opal pmix put proc sessiondir";
//.........这里部分代码省略.........
示例14: orte_rds_hostfile_query
//.........这里部分代码省略.........
}
#endif
local_cellid = 0;
need_cellid = false;
}
rds_item->cellid = local_cellid;
ras_item->node_cellid = local_cellid;
new_attr = OBJ_NEW(orte_rds_cell_attr_t);
if (NULL == new_attr) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
}
new_attr->keyval.key = strdup(ORTE_RDS_NAME);
new_attr->keyval.value = OBJ_NEW(orte_data_value_t);
if (NULL == new_attr->keyval.value) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
}
new_attr->keyval.value->type = ORTE_STRING;
new_attr->keyval.value->data = strdup(ras_item->node_name);
opal_list_append(&(rds_item->attributes), &new_attr->super);
new_attr = OBJ_NEW(orte_rds_cell_attr_t);
if (NULL == new_attr) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
}
new_attr->keyval.key = strdup(ORTE_CELLID_KEY);
new_attr->keyval.value = OBJ_NEW(orte_data_value_t);
if (NULL == new_attr->keyval.value) {
ORTE_ERROR_LOG(ORTE_ERR_OUT_OF_RESOURCE);
return ORTE_ERR_OUT_OF_RESOURCE;
}
new_attr->keyval.value->type = ORTE_CELLID;
if (ORTE_SUCCESS != (rc = orte_dss.copy(&(new_attr->keyval.value->data), &(rds_item->cellid), ORTE_CELLID))) {
ORTE_ERROR_LOG(rc);
return rc;
}
opal_list_append(&(rds_item->attributes), &new_attr->super);
opal_list_append(&rds_updates, &rds_item->super);
}
/* Insert the new node into the RDS */
rc = orte_rds.store_resource(&rds_updates);
if (ORTE_SUCCESS != rc) {
goto cleanup;
}
/* Then the RAS, since we can assume that any
* resources listed in the hostfile have been
* already allocated for our use.
*/
rc = orte_ras_base_node_insert(&updates);
if (ORTE_SUCCESS != rc) {
goto cleanup;
}
/* and now, indicate that ORTE should override any oversubscribed conditions
* based on local hardware limits since the user (a) might not have
* provided us any info on the #slots for a node, and (b) the user
* might have been wrong! If we don't check the number of local physical
* processors, then we could be too aggressive on our sched_yield setting
* and cause performance problems.
*/
rc = orte_ras_base_set_oversubscribe_override(job);
if (ORTE_SUCCESS != rc) {
goto cleanup;
}
}
cleanup:
if (NULL != mca_rds_hostfile_component.path) {
free(mca_rds_hostfile_component.path);
mca_rds_hostfile_component.path = NULL;
}
while(NULL != (item = opal_list_remove_first(&existing))) {
OBJ_RELEASE(item);
}
while(NULL != (item = opal_list_remove_first(&updates))) {
OBJ_RELEASE(item);
}
while (NULL != (rds_item = (orte_rds_cell_desc_t*)opal_list_remove_first(&rds_updates))) {
while (NULL != (new_attr = (orte_rds_cell_attr_t*)opal_list_remove_first(&(rds_item->attributes)))) {
OBJ_RELEASE(new_attr);
}
OBJ_RELEASE(rds_item);
}
OBJ_DESTRUCT(&existing);
OBJ_DESTRUCT(&updates);
OBJ_DESTRUCT(&rds_updates);
return rc;
}
示例15: ompi_osc_rdma_replyreq_destruct
static void ompi_osc_rdma_replyreq_destruct(ompi_osc_rdma_replyreq_t *replyreq)
{
OBJ_DESTRUCT(&(replyreq->rep_target_convertor));
}