本文整理汇总了C++中Endpoint::getAddr方法的典型用法代码示例。如果您正苦于以下问题:C++ Endpoint::getAddr方法的具体用法?C++ Endpoint::getAddr怎么用?C++ Endpoint::getAddr使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Endpoint
的用法示例。
在下文中一共展示了Endpoint::getAddr方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ThreadPerConnection
bool P3ReplicationGroup::prebindDataToReplica(OverlayPeerInfoPtr& replica) {
ACE_GUARD_RETURN(ACE_SYNCH_RECURSIVE_MUTEX, mon, m_lock, false);
ACE_Connector<FTDataClientHandler, ACE_SOCK_Connector> connector;
Endpoint endpoint;
replica->getFTDataSAPInfo()->getFirstEndpoint(endpoint);
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::prebindDataToReplica - SAP=%s\n"), endpoint.toString().c_str()));
ThreadPerConnection *tpc = new ThreadPerConnection();
//ACE_Strong_Bound_Ptr<ThreadPerConnection, ACE_Recursive_Thread_Mutex>* tpcPrt = new
// ACE_Strong_Bound_Ptr<ThreadPerConnection, ACE_Recursive_Thread_Mutex > (tpc);
ExecutionModelPtr* tpcPrt = new
ExecutionModelPtr(tpc);
FTDataClientHandler* ftDataClient = new FTDataClientHandler(this,
replica->getUUID(),
replica->getFID(),
this->getGroupUUID(),
false, false, tpcPrt, 0, 0, 0);
CPUReservation* reserve = 0;
CPUQoS* cpuQoS = new CPUPriorityQoS(CPUQoS::SCHEDULE_RT_DEFAULT, CPUQoS::MAX_RT_PRIO);
if (this->m_ft->getQoSManager() != 0) {
reserve = m_ft->getQoSManager()->createCPUReservation("HRT", cpuQoS);
}
tpc->bind(ftDataClient);
tpc->open(reserve, cpuQoS);
//tpc->bind(ftDataClient);
connector.reactor(tpc->getResources()->getReactor());
if (connector.connect(ftDataClient, endpoint.getAddr()) == -1) {
ACE_ERROR((LM_ERROR, ACE_TEXT("(%T)%@\n"),
ACE_TEXT("(%T)ERROR: P3ReplicationGroup::prebindDataToReplica - connect failed:")));
ftDataClient->close();
delete ftDataClient;
return false;
} else {
if (m_debugP3ReplicationGroup) {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::prebindDataToReplica - Connect Data to Replica - OK! UUID=%s\n"),
replica->getUUID()->toString().c_str()));
}
}
ftDataClient->preBindSession();
if (m_clientDataManager.add(ftDataClient) == -1) {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::prebindDataToReplica - failed to add to group UUID=%s\n"),
replica->getUUID()->toString().c_str()));
return false;
}
if (ftDataClient->asynchronous(true, false) == -1) {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::prebindDataToReplica - failed to add to async UUID=%s\n"),
replica->getUUID()->toString().c_str()));
ftDataClient->close();
return false;
}
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::prebindDataToReplica - OK! UUID=%s\n"),
replica->getUUID()->toString().c_str()));
return true;
}
示例2: ServiceException
void P3LeafMesh::createRemoteService(const SAPInfo* hint, const UUIDPtr& uuid, const UUIDPtr& sid, ServiceParamsPtr& params, UUIDPtr& iid) throw (ServiceException&) {
ACE_GUARD(ACE_SYNCH_RECURSIVE_MUTEX, ace_mon, m_lock);
if (hint == 0) {
throw ServiceException(ServiceException::SERVICE_WITHOUT_IMPL);
}
Endpoint endpoint;
ACE_Connector<P3MeshClientHandler, ACE_SOCK_Connector> connector;
hint->getFirstEndpoint(endpoint);
QoSEndpoint qosE = *(endpoint.getQoS());
UUIDPtr runtimeUUID;
getUUID(runtimeUUID);
UUIDPtr fid;
getFID(fid);
P3MeshClientHandler* clientHandler = new P3MeshClientHandler(
runtimeUUID,
fid,
qosE,
false, false, 0, 0, 0, 0);
if (connector.connect(clientHandler, endpoint.getAddr()) == -1) {
ACE_ERROR((LM_ERROR, ACE_TEXT("(%T)%@\n"),
ACE_TEXT("(%T)ERROR: P3Mesh::createRemoteService - connect failed:")));
clientHandler->close();
clientHandler = 0;
delete clientHandler;
} else {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3LeafMesh::createRemoteService - Connect OK!\n")));
}
int ret = clientHandler->createService(params, iid);
clientHandler->close();
delete clientHandler;
if (ret == -1) {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3LeafMesh::createRemoteService - failed to create, not enough resources\n")));
throw ServiceException(ServiceException::INSUFFICIENT_RESOURCES);
}
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3LeafMesh::createRemoteService - service created!\n")));
}
示例3: bindClientHandler
int ActuatorServiceClient::bindClientHandler() {
ACE_GUARD_RETURN(ACE_SYNCH_RECURSIVE_MUTEX, ace_mon, m_lock, -1);
FindServiceInstanceQuery* query = new FindServiceInstanceQuery(m_sid, m_iid);
Stheno* runtime = this->getRuntime();
OverlayInterface* oi = runtime->getOverlayInterface();
DiscoveryInterfacePtr discoveryPtr = oi->getDiscovery();
printf("ActuatorServiceClient::bindClientHandler() OI=%p\n", oi);
DiscoveryQueryReply* queryReply_ = discoveryPtr->executeQuery(query);
if (queryReply_ == 0) {
return -1;
}
if (queryReply_->isException()) {
//printf("ActuatorServiceClient: exception! SID(%s) IID(%s)\n", m_sid->toString().c_str(), m_iid->toString().c_str());
delete queryReply_;
return -1;
}
FindServiceInstanceQueryReply *queryReply = new FindServiceInstanceQueryReply(queryReply_);
//printf("StreamClient: FindServiceInstanceQueryReply!\n",queryReply->
//EndpointPtr endpoint = m_streamParams->getEndpoint();
Endpoint endpoint;
//queryReply->getSAPInfo()->getFirstEndpoint(endpoint);
queryReply->getSAPInfo()->getEndpoint(m_cp->getQoS(), endpoint);
printf("ActuatorServiceClient: FindServiceInstanceQueryReply! endpoint=%s\n", endpoint.toString().c_str());
ACE_Connector<ActuatorServiceClientHandler, ACE_SOCK_Connector> connector;
//QoSEndpoint qosE = *(endpoint.getQoS());
ThreadPerConnection *tpc = new ThreadPerConnection();
//ACE_Strong_Bound_Ptr<ThreadPerConnection, ACE_Recursive_Thread_Mutex>* tpcPrt = new
//ACE_Strong_Bound_Ptr<ThreadPerConnection, ACE_Recursive_Thread_Mutex > (tpc);
ExecutionModelPtr* tpcPrt = new
ExecutionModelPtr(tpc);
//QoSEndpoint qosEndpoint(Endpoint::TCP, 50);
UUIDPtr runtimeUUID;
UUIDPtr fid;
m_runtime->getUUID(runtimeUUID);
m_runtime->getOverlayInterface()->getMesh()->getFID(fid);
m_clientHandler = new ActuatorServiceClientHandler(
runtimeUUID,
fid,
false, false, tpcPrt, 0, 0, 0);
printf("ActuatorServiceClient: before tpc\n");
//tpc->open();
CPUReservation* reserve = 0;
//CPUQoS* cpuQoS = new CPUPriorityQoS(CPUQoS::SCHEDULE_FIFO, CPUQoS::MAX_RT_PRIO);
CPUQoS* cpuQoS = m_cp->createCPUQoS(); //new CPUPriorityQoS(m_cp->getScheduleType(), m_cp->getPriority());
tpc->bind(m_clientHandler);
tpc->open(reserve, cpuQoS);
//tpc->bind(m_clientHandler);
connector.reactor(tpc->getResources()->getReactor());
printf("ActuatorServiceClient: before connect\n");
if (connector.connect(m_clientHandler, endpoint.getAddr()) == -1) {
ACE_ERROR((LM_ERROR, ACE_TEXT("(%T)%@\n"),
ACE_TEXT("(%T)ERROR: ActuatorServiceClient::executeQuery() - connect failed:")));
m_clientHandler->close();
delete m_clientHandler;
m_clientHandler = 0;
return -1;
} else {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: ActuatorServiceClient::open_i() - Connect to service! %@\n"), m_clientHandler));
if (m_rebindStart.msec() != 0) {
ACE_Time_Value rebindEnd = ACE_OS::gettimeofday() - m_rebindStart;
m_rebindTotalTime += rebindEnd;
}
}
printf("ActuatorServiceClient: after connect\n");
//m_client->setCloseListener(this);
/*if (m_clientHandler->setCloseListener(this) == -1) {
m_clientHandler->close();
delete m_clientHandler;
m_clientHandler = 0;
return -1;
}*/
//if (m_clientHandler->asynchronous(true, false) != 0) {
if (m_clientHandler->asynchronous(true, false) != 0) {
m_clientHandler->close();
delete m_clientHandler;
m_clientHandler = 0;
return -1;
}
return 0;
}
示例4: bindClientHandler
int RPCServiceClient::bindClientHandler() {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T) RPCServiceClient: RPCServiceClient::bindClientHandler()\n")));
ACE_GUARD_RETURN(ACE_SYNCH_RECURSIVE_MUTEX, ace_mon, m_lock, -1);
FindServiceInstanceQuery* query = new FindServiceInstanceQuery(m_sid, m_iid);
Stheno* runtime = this->getRuntime();
OverlayInterface* oi = runtime->getOverlayInterface();
DiscoveryInterfacePtr discoveryPtr = oi->getDiscovery();
//printf("RPCServiceClient::bindClientHandler() OI=%p\n", oi);
DiscoveryQueryReply* queryReply_ = discoveryPtr->executeQuery(query);
//query is already owned
/*ACE_DEBUG((LM_DEBUG, ACE_TEXT("********************RPCServiceClient::bindClientHandler %s %s %p\n"),
m_sid->toString().c_str(),
m_iid->toString().c_str(),
queryReply_));*/
if (queryReply_ == 0) {
printf("query == bnull!\n");
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T) RPCServiceClient: RPCServiceClient::bindClientHandler() query null!\n")));
return -1;
}
if (queryReply_->isException()) {
printf("query == ex!\n");
delete queryReply_;
return -1;
}
FindServiceInstanceQueryReply *queryReply = new FindServiceInstanceQueryReply(queryReply_);
Endpoint endpoint;
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T) RPCServiceClient: RPCServiceClient::bindClientHandler() before endpoint choosing\n")));
try {
queryReply->getSAPInfo()->getEndpoint(m_cp->getQoS(), endpoint);
} catch (EndpointException& ex) {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T) RPCServiceClient: RPCServiceClient::bindClientHandler() endpoint EX!\n")));
delete queryReply;
return -1;
}
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T) RPCServiceClient: bindClientHandler! endpoint=%s\n"), endpoint.toString().c_str()));
ACE_Connector<RPCServiceClientHandler, ACE_SOCK_Connector> connector;
//QoSEndpoint qosE = *(endpoint.getQoS());
ThreadPerConnection *tpc = new ThreadPerConnection();
//ACE_Strong_Bound_Ptr<ThreadPerConnection, ACE_Recursive_Thread_Mutex>* tpcPrt = new
// ACE_Strong_Bound_Ptr<ThreadPerConnection, ACE_Recursive_Thread_Mutex > (tpc);
ExecutionModelPtr* tpcPrt = new
ExecutionModelPtr(tpc);
//QoSEndpoint qosEndpoint(Endpoint::TCP, 50);
if (m_clientHandler != 0) {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T) RPCServiceClient: RPCServiceClient::bindClientHandler() closing open client\n")));
m_clientHandler->close();
delete m_clientHandler;
}
UUIDPtr runtimeUUID;
UUIDPtr fid;
m_runtime->getUUID(runtimeUUID);
m_runtime->getOverlayInterface()->getMesh()->getFID(fid);
m_clientHandler = new RPCServiceClientHandler(
runtimeUUID,
fid,
m_cp->getQoS(),
false, false, tpcPrt, 0, 0, 0);
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T) RPCServiceClient: RPCServiceClient::bindClientHandler() endpoint client=%@\n"), m_clientHandler));
//tpc->open();
CPUReservation* reserve = 0;
CPUQoS* cpuQoS = m_cp->createCPUQoS();
//CPUQoS* cpuQoS = new CPUPriorityQoS(CPUQoS::SCHEDULE_FIFO, CPUQoS::MAX_RT_PRIO);
//CPUQoS* cpuQoS = new CPUPriorityQoS(m_cp->getScheduleType(), m_cp->getPriority());
tpc->bind(m_clientHandler);
tpc->open(reserve, cpuQoS);
//tpc->bind(m_clientHandler);
connector.reactor(tpc->getResources()->getReactor());
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T) RPCServiceClient: bindClientHandler before connect\n")));
if (connector.connect(m_clientHandler, endpoint.getAddr()) == -1) {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%T)(%t|%T) ERROR: RPCServiceClient::bindClientHandler() - connect failed!")));
m_clientHandler->close();
delete m_clientHandler;
m_clientHandler = 0;
return -1;
} else {
ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: RPCServiceClient::bindClientHandler() - Connect to service! client=%@\n"), m_clientHandler));
if (m_rebindStart.msec() != 0) {
ACE_Time_Value rebindEnd = ACE_OS::gettimeofday() - m_rebindStart;
m_rebindTotalTime += rebindEnd;
m_rebindStart = ACE_Time_Value(0,0);
}
}
/*if (m_clientHandler->asynchronous(true, false) != 0) {
m_clientHandler->close();
delete m_clientHandler;
m_clientHandler = 0;
return -1;
}*/
/*if (m_rebindStart.msec() != 0) {
ACE_Time_Value rebindEnd = ACE_OS::gettimeofday() - m_rebindStart;
m_rebindTotalTime += rebindEnd;
//.........这里部分代码省略.........