本文整理汇总了C++中TEventBase::loopForever方法的典型用法代码示例。如果您正苦于以下问题:C++ TEventBase::loopForever方法的具体用法?C++ TEventBase::loopForever怎么用?C++ TEventBase::loopForever使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TEventBase
的用法示例。
在下文中一共展示了TEventBase::loopForever方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sst
TEST(ThriftServer, CallbackOrderingTest) {
apache::thrift::TestThriftServerFactory<TestInterface> factory;
auto server = factory.create();
auto serverHandler = std::make_shared<TestServerEventHandler>();
TProcessorBase::addProcessorEventHandlerFactory(serverHandler);
server->setServerEventHandler(serverHandler);
ScopedServerThread sst(server);
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
TestServiceAsyncClient client(
std::unique_ptr<HeaderClientChannel,
apache::thrift::async::TDelayedDestruction::Destructor>(
new HeaderClientChannel(socket)));
client.noResponse([](ClientReceiveState&& state){}, 10000);
base.tryRunAfterDelay([&](){
socket->closeNow();
}, 1);
base.tryRunAfterDelay([&](){
base.terminateLoopSoon();
}, 20);
base.loopForever();
serverHandler->check();
TProcessorBase::removeProcessorEventHandlerFactory(serverHandler);
}
示例2: runRequestContextTest
// Test if multiple requests are pending in a queue, for security to establish,
// then we flow RequestContext correctly with each request.
void runRequestContextTest(bool failSecurity) {
ScopedServerThread sst(getServer());
TEventBase base;
auto channel = getClientChannel(&base, *sst.getAddress(), failSecurity);
TestServiceAsyncClient client(std::move(channel));
Countdown c(2, [&base](){base.terminateLoopSoon();});
// Send first request with a unique RequestContext. This would trigger
// security. Rest of the request would queue behind it.
folly::RequestContext::create();
folly::RequestContext::get()->setContextData("first", nullptr);
client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
EXPECT_TRUE(folly::RequestContext::get()->hasContextData("first"));
c.down();
}, 10);
// Send another request with a unique RequestContext. This request would
// queue behind the first one inside HeaderClientChannel.
folly::RequestContext::create();
folly::RequestContext::get()->setContextData("second", nullptr);
client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
EXPECT_FALSE(folly::RequestContext::get()->hasContextData("first"));
EXPECT_TRUE(folly::RequestContext::get()->hasContextData("second"));
c.down();
}, 10);
// Now start looping the eventbase to guarantee that all the above requests
// would always queue.
base.loopForever();
}
示例3: sst
TEST(Duplex, DuplexTest) {
enum {START=1, COUNT=10, INTERVAL=5};
ScopedServerThread sst(getServer());
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
auto duplexChannel =
std::make_shared<DuplexChannel>(DuplexChannel::Who::CLIENT, socket);
DuplexServiceAsyncClient client(duplexChannel->getClientChannel());
bool success = false;
ThriftServer clients_server(duplexChannel->getServerChannel());
clients_server.setInterface(std::make_shared<DuplexClientInterface>(
START, COUNT, success));
clients_server.serve();
client.registerForUpdates([](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
bool res = DuplexServiceAsyncClient::recv_registerForUpdates(state);
EXPECT_TRUE(res);
}, START, COUNT, INTERVAL);
// fail on time out
base.tryRunAfterDelay([] {EXPECT_TRUE(false);}, 5000);
base.loopForever();
EXPECT_TRUE(success);
}
示例4: TestData
TEST(RequestContext, SimpleTest) {
TEventBase base;
EXPECT_FALSE(RequestContext::create());
EXPECT_TRUE(RequestContext::create());
EXPECT_TRUE(RequestContext::get() != nullptr);
EXPECT_EQ(nullptr, RequestContext::get()->getContextData("test"));
RequestContext::get()->setContextData(
"test",
std::unique_ptr<TestData>(new TestData(10)));
base.runInEventBaseThread([&](){
EXPECT_TRUE(RequestContext::get() != nullptr);
auto data = dynamic_cast<TestData*>(
RequestContext::get()->getContextData("test"))->data_;
EXPECT_EQ(10, data);
base.terminateLoopSoon();
});
auto th = std::thread([&](){
base.loopForever();
});
th.join();
EXPECT_TRUE(RequestContext::get() != nullptr);
auto a = dynamic_cast<TestData*>(
RequestContext::get()->getContextData("test"));
auto data = a->data_;
EXPECT_EQ(10, data);
RequestContext::setContext(std::shared_ptr<RequestContext>());
// There should always be a default context
EXPECT_TRUE(nullptr != RequestContext::get());
}
示例5: sst
TEST(ThriftServer, IdleTimeoutAfterTest) {
ScopedServerThread sst(getServer());
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
auto client_channel = HeaderClientChannel::newChannel(socket);
auto client_channelp = client_channel.get();
CloseChecker checker;
client_channel->setCloseCallback(&checker);
TestServiceAsyncClient client(std::move(client_channel));
std::string ret;
client.sync_sendResponse(ret, 20);
EXPECT_FALSE(checker.getClosed());
base.tryRunAfterDelay([&base](){
base.terminateLoopSoon();
}, 200);
base.loopForever();
EXPECT_TRUE(checker.getClosed());
client_channelp->setCloseCallback(nullptr);
}
示例6: main
int main(int argc, char **argv) {
TEventBase base;
int port = 8082;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, "127.0.0.1", port));
auto client_channel = HeaderClientChannel::newChannel(socket);
CalculatorAsyncClient client(std::move(client_channel));
folly::wangle::Future<int64_t> f = client.future_add(2, 3);
f.then(
[](Try<int64_t>&& t) {
std::cout << "Result = " << t.value() << std::endl;
}
);
// Run a single iteration of the event loop, in reality, this would be a
// nearly infinite loop, where we stop the looping when the client should terminate
base.loopForever();
}
示例7: runTest
void runTest(std::function<void(HeaderClientChannel* channel)> setup) {
ScopedServerThread sst(getServer());
TEventBase base;
auto channel = getClientChannel(&base, *sst.getAddress());
setup(channel.get());
TestServiceAsyncClient client(std::move(channel));
Countdown c(3, [&base](){base.terminateLoopSoon();});
client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
EXPECT_TRUE(state.isSecurityActive());
std::string res;
try {
TestServiceAsyncClient::recv_sendResponse(res, state);
} catch(const std::exception&) {
EXPECT_TRUE(false);
}
EXPECT_EQ(res, "10");
c.down();
}, 10);
// fail on time out
base.tryRunAfterDelay([] {EXPECT_TRUE(false);}, 5000);
base.tryRunAfterDelay([&client,&base,&c] {
client.sendResponse([&base,&c](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
EXPECT_TRUE(state.isSecurityActive());
std::string res;
try {
TestServiceAsyncClient::recv_sendResponse(res, state);
} catch(const std::exception&) {
EXPECT_TRUE(false);
}
EXPECT_EQ(res, "10");
c.down();
}, 10);
client.sendResponse([&base,&c](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
EXPECT_TRUE(state.isSecurityActive());
std::string res;
try {
TestServiceAsyncClient::recv_sendResponse(res, state);
} catch(const std::exception&) {
EXPECT_TRUE(false);
}
EXPECT_EQ(res, "10");
c.down();
}, 10);
}, 1);
base.loopForever();
}
示例8: socket
TEST(ThriftServer, IdleTimeoutTest) {
TEventBase base;
auto port = Server::get(getServer)->getAddress().getPort();
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, "127.0.0.1", port));
auto client_channel = HeaderClientChannel::newChannel(socket);
CloseChecker checker;
client_channel->setCloseCallback(&checker);
base.runAfterDelay([&base](){
base.terminateLoopSoon();
}, 100);
base.loopForever();
EXPECT_TRUE(checker.getClosed());
}
示例9: duplexTest
void duplexTest(const apache::thrift::SecurityMech mech) {
enum {START=1, COUNT=3, INTERVAL=1};
ScopedServerThread duplexsst(getDuplexServer());
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *duplexsst.getAddress()));
auto duplexChannel =
std::make_shared<DuplexChannel>(DuplexChannel::Who::CLIENT, socket);
enableSecurity(duplexChannel->getClientChannel().get(), mech);
DuplexServiceAsyncClient client(duplexChannel->getClientChannel());
bool success = false;
ThriftServer clients_server(duplexChannel->getServerChannel());
clients_server.setInterface(std::make_shared<DuplexClientInterface>(
START, COUNT, success));
clients_server.serve();
client.registerForUpdates([](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
EXPECT_TRUE(state.isSecurityActive());
try {
bool res = DuplexServiceAsyncClient::recv_registerForUpdates(state);
EXPECT_TRUE(res);
} catch (const std::exception&) {
EXPECT_TRUE(false);
}
}, START, COUNT, INTERVAL);
// fail on time out
base.tryRunAfterDelay([] {EXPECT_TRUE(false);}, 5000);
base.loopForever();
EXPECT_TRUE(success);
}