本文整理汇总了C++中ice::EndpointSeq::push_back方法的典型用法代码示例。如果您正苦于以下问题:C++ EndpointSeq::push_back方法的具体用法?C++ EndpointSeq::push_back怎么用?C++ EndpointSeq::push_back使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ice::EndpointSeq
的用法示例。
在下文中一共展示了EndpointSeq::push_back方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getEndpoints
Ice::ObjectPrx
WellKnownObjectsManager::getWellKnownObjectReplicatedProxy(const Ice::Identity& id, const string& endpt)
{
try
{
Ice::ObjectPrx proxy = _database->getObjectProxy(id);
Ice::EndpointSeq registryEndpoints = getEndpoints(endpt)->ice_getEndpoints();
//
// Re-order the endpoints to return first the endpoint for this
// registry replica.
//
Ice::EndpointSeq endpoints = proxy->ice_getEndpoints();
Ice::EndpointSeq newEndpoints = registryEndpoints;
for(Ice::EndpointSeq::const_iterator p = endpoints.begin(); p != endpoints.end(); ++p)
{
if(find(registryEndpoints.begin(), registryEndpoints.end(), *p) == registryEndpoints.end())
{
newEndpoints.push_back(*p);
}
}
return proxy->ice_endpoints(newEndpoints);
}
catch(const ObjectNotRegisteredException&)
{
//
// If for some reasons the object isn't registered, we compute
// the endpoints with the replica cache. For slaves, this will
// however only return the slave endpoints.
//
return _database->getReplicaCache().getEndpoints(endpt, getEndpoints(endpt))->ice_identity(id);
}
}
示例2: assert
ZEND_METHOD(Ice_ObjectPrx, ice_endpoints)
{
ProxyPtr _this = Wrapper<ProxyPtr>::value(getThis() TSRMLS_CC);
assert(_this);
zval* zv;
if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, const_cast<char*>("a"), &zv) == FAILURE)
{
RETURN_NULL();
}
Ice::EndpointSeq seq;
HashTable* arr = Z_ARRVAL_P(zv);
HashPosition pos;
void* data;
zend_hash_internal_pointer_reset_ex(arr, &pos);
while(zend_hash_get_current_data_ex(arr, &data, &pos) != FAILURE)
{
zval** val = reinterpret_cast<zval**>(data);
if(Z_TYPE_PP(val) != IS_OBJECT)
{
runtimeError("expected an element of type Ice::Endpoint" TSRMLS_CC);
RETURN_NULL();
}
Ice::EndpointPtr endpoint;
if(!fetchEndpoint(*val, endpoint TSRMLS_CC))
{
RETURN_NULL();
}
seq.push_back(endpoint);
zend_hash_move_forward_ex(arr, &pos);
}
try
{
if(!_this->clone(return_value, _this->proxy->ice_endpoints(seq) TSRMLS_CC))
{
RETURN_NULL();
}
}
catch(const IceUtil::Exception& ex)
{
throwException(ex TSRMLS_CC);
RETURN_NULL();
}
}
示例3: sync
void
NodeSessionManager::create(const NodeIPtr& node)
{
{
Lock sync(*this);
assert(!_node);
const_cast<NodeIPtr&>(_node) = node;
Ice::CommunicatorPtr communicator = _node->getCommunicator();
assert(communicator->getDefaultLocator());
Ice::Identity id = communicator->getDefaultLocator()->ice_getIdentity();
//
// Initialize the IceGrid::Query objects. The IceGrid::Query
// interface is used to lookup the registry proxy in case it
// becomes unavailable. Since replicas might not always have
// an up to date registry proxy, we need to query all the
// replicas.
//
Ice::EndpointSeq endpoints = communicator->getDefaultLocator()->ice_getEndpoints();
id.name = "Query";
QueryPrx query = QueryPrx::uncheckedCast(communicator->stringToProxy(communicator->identityToString(id)));
for(Ice::EndpointSeq::const_iterator p = endpoints.begin(); p != endpoints.end(); ++p)
{
Ice::EndpointSeq singleEndpoint;
singleEndpoint.push_back(*p);
_queryObjects.push_back(QueryPrx::uncheckedCast(query->ice_endpoints(singleEndpoint)));
}
id.name = "InternalRegistry-Master";
_master = InternalRegistryPrx::uncheckedCast(communicator->stringToProxy(communicator->identityToString(id)));
_thread = new Thread(*this);
_thread->start();
}
//
// Try to create the session. It's important that we wait for the
// creation of the session as this will also try to create sessions
// with replicas (see createdSession below) and this must be done
// before the node is activated.
//
_thread->tryCreateSession(true, IceUtil::Time::seconds(3));
}
示例4: catch
int
run(int argc, char* argv[], const CommunicatorPtr& communicator)
{
IceUtilInternal::Options opts;
opts.addOpt("", "cycle");
try
{
opts.parse(argc, (const char**)argv);
}
catch(const IceUtilInternal::BadOptException& e)
{
cerr << argv[0] << ": " << e.reason << endl;
return EXIT_FAILURE;
}
PropertiesPtr properties = communicator->getProperties();
const char* managerProxyProperty = "IceStormAdmin.TopicManager.Default";
string managerProxy = properties->getProperty(managerProxyProperty);
if(managerProxy.empty())
{
cerr << argv[0] << ": property `" << managerProxyProperty << "' is not set" << endl;
return EXIT_FAILURE;
}
IceStorm::TopicManagerPrx manager = IceStorm::TopicManagerPrx::checkedCast(
communicator->stringToProxy(managerProxy));
if(!manager)
{
cerr << argv[0] << ": `" << managerProxy << "' is not running" << endl;
return EXIT_FAILURE;
}
TopicPrx topic;
while(true)
{
try
{
topic = manager->retrieve("single");
break;
}
// This can happen if the replica group loses the majority
// during retrieve. In this case we retry.
catch(const Ice::UnknownException&)
{
continue;
}
catch(const IceStorm::NoSuchTopic& e)
{
cerr << argv[0] << ": NoSuchTopic: " << e.name << endl;
return EXIT_FAILURE;
}
}
assert(topic);
//
// Get a publisher object, create a twoway proxy and then cast to
// a Single object.
//
if(opts.isSet("cycle"))
{
Ice::ObjectPrx prx = topic->getPublisher()->ice_twoway();
vector<SinglePrx> single;
Ice::EndpointSeq endpoints = prx->ice_getEndpoints();
for(Ice::EndpointSeq::const_iterator p = endpoints.begin(); p != endpoints.end(); ++p)
{
if((*p)->toString().substr(0, 3) != "udp")
{
Ice::EndpointSeq e;
e.push_back(*p);
single.push_back(SinglePrx::uncheckedCast(prx->ice_endpoints(e)));
}
}
if(single.size() <= 1)
{
cerr << argv[0] << ": Not enough endpoints in publisher proxy" << endl;
return EXIT_FAILURE;
}
int which = 0;
for(int i = 0; i < 1000; ++i)
{
single[which]->event(i);
which = (which + 1) % static_cast<int>(single.size());
}
}
else
{
SinglePrx single = SinglePrx::uncheckedCast(topic->getPublisher()->ice_twoway());
for(int i = 0; i < 1000; ++i)
{
single->event(i);
}
}
return EXIT_SUCCESS;
}