本文整理汇总了C++中TEventBase类的典型用法代码示例。如果您正苦于以下问题:C++ TEventBase类的具体用法?C++ TEventBase怎么用?C++ TEventBase使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TEventBase类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: TEST
TEST(ThriftServer, IdleTimeoutAfterTest) {
ScopedServerThread sst(getServer());
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
auto client_channel = HeaderClientChannel::newChannel(socket);
auto client_channelp = client_channel.get();
CloseChecker checker;
client_channel->setCloseCallback(&checker);
TestServiceAsyncClient client(std::move(client_channel));
std::string ret;
client.sync_sendResponse(ret, 20);
EXPECT_FALSE(checker.getClosed());
base.tryRunAfterDelay([&base](){
base.terminateLoopSoon();
}, 200);
base.loopForever();
EXPECT_TRUE(checker.getClosed());
client_channelp->setCloseCallback(nullptr);
}
示例2: TEST
/**
* Test SSL server accept timeout with cache path
*/
TEST(TAsyncSSLSocketTest, SSLServerCacheCloseTest) {
// Start listening on a local port
WriteCallbackBase writeCallback;
ReadCallback readCallback(&writeCallback);
HandshakeCallback handshakeCallback(&readCallback,
HandshakeCallback::EXPECT_ERROR);
SSLServerAsyncCacheAcceptCallback acceptCallback(&handshakeCallback);
TestSSLAsyncCacheServer server(&acceptCallback, 500);
// Set up SSL client
TEventBase eventBase;
std::shared_ptr<SSLClient> client(new SSLClient(&eventBase, server.getAddress(),
2, 100));
client->connect();
EventBaseAborter eba(&eventBase, 3000);
eventBase.loop();
server.getEventBase().runInEventBaseThread([&handshakeCallback]{
handshakeCallback.closeSocket();});
// give time for the cache lookup to come back and find it closed
usleep(500000);
EXPECT_EQ(server.getAsyncCallbacks(), 1);
EXPECT_EQ(server.getAsyncLookups(), 1);
EXPECT_EQ(client->getErrors(), 1);
EXPECT_EQ(client->getMiss(), 1);
EXPECT_EQ(client->getHit(), 0);
cerr << "SSLServerCacheCloseTest test completed" << endl;
}
示例3: TEST
/*
* Test some timeouts that are scheduled on one timeout set, then moved to
* another timeout set.
*/
TEST(TAsyncTimeoutSetTest, SwitchTimeoutSet) {
TEventBase eventBase;
StackTimeoutSet ts10(&eventBase, milliseconds(10));
StackTimeoutSet ts5(&eventBase, milliseconds(5));
TestTimeout t1(&ts5, &ts10, &ts5);
TestTimeout t2(&ts10, &ts10, &ts5);
TestTimeout t3(&ts5, &ts5, &ts10, &ts5);
ts5.scheduleTimeout(&t1);
TimePoint start;
eventBase.loop();
TimePoint end;
ASSERT_EQ(t1.timestamps.size(), 3);
ASSERT_EQ(t2.timestamps.size(), 3);
ASSERT_EQ(t3.timestamps.size(), 4);
T_CHECK_TIMEOUT(start, t1.timestamps[0], milliseconds(5));
T_CHECK_TIMEOUT(t1.timestamps[0], t1.timestamps[1], milliseconds(10));
T_CHECK_TIMEOUT(t1.timestamps[1], t1.timestamps[2], milliseconds(5));
T_CHECK_TIMEOUT(start, t2.timestamps[0], milliseconds(10));
T_CHECK_TIMEOUT(t2.timestamps[0], t2.timestamps[1], milliseconds(10));
T_CHECK_TIMEOUT(t2.timestamps[1], t2.timestamps[2], milliseconds(5));
T_CHECK_TIMEOUT(start, t3.timestamps[0], milliseconds(5));
T_CHECK_TIMEOUT(t3.timestamps[0], t3.timestamps[1], milliseconds(5));
T_CHECK_TIMEOUT(t3.timestamps[1], t3.timestamps[2], milliseconds(10));
T_CHECK_TIMEOUT(t3.timestamps[2], t3.timestamps[3], milliseconds(5));
// 10ms fudge factor to account for loaded machines
T_CHECK_TIMEOUT(start, end, milliseconds(25), milliseconds(10));
}
示例4: TEST
TEST(ThriftServer, OnewayFutureClientTest) {
using std::chrono::steady_clock;
ScopedServerThread sst(getServer());
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
FutureServiceAsyncClient client(
std::unique_ptr<HeaderClientChannel,
apache::thrift::async::TDelayedDestruction::Destructor>(
new HeaderClientChannel(socket)));
auto future = client.future_noResponse(1000);
steady_clock::time_point sent = steady_clock::now();
// wait for future to finish.
base.loop();
steady_clock::time_point waited = steady_clock::now();
future.value();
steady_clock::time_point got = steady_clock::now();
steady_clock::duration waitTime = waited - sent;
steady_clock::duration gotTime = got - waited;
int factor = 1;
EXPECT_GE(waitTime, factor * gotTime);
}
示例5: runRequestContextTest
// Test if multiple requests are pending in a queue, for security to establish,
// then we flow RequestContext correctly with each request.
void runRequestContextTest(bool failSecurity) {
ScopedServerThread sst(getServer());
TEventBase base;
auto channel = getClientChannel(&base, *sst.getAddress(), failSecurity);
TestServiceAsyncClient client(std::move(channel));
Countdown c(2, [&base](){base.terminateLoopSoon();});
// Send first request with a unique RequestContext. This would trigger
// security. Rest of the request would queue behind it.
folly::RequestContext::create();
folly::RequestContext::get()->setContextData("first", nullptr);
client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
EXPECT_TRUE(folly::RequestContext::get()->hasContextData("first"));
c.down();
}, 10);
// Send another request with a unique RequestContext. This request would
// queue behind the first one inside HeaderClientChannel.
folly::RequestContext::create();
folly::RequestContext::get()->setContextData("second", nullptr);
client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
EXPECT_FALSE(folly::RequestContext::get()->hasContextData("first"));
EXPECT_TRUE(folly::RequestContext::get()->hasContextData("second"));
c.down();
}, 10);
// Now start looping the eventbase to guarantee that all the above requests
// would always queue.
base.loopForever();
}
示例6: AsyncCpp2Test
void AsyncCpp2Test(bool enable_security) {
apache::thrift::TestThriftServerFactory<TestInterface> factory;
ScopedServerThread sst(factory.create());
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
auto client_channel = HeaderClientChannel::newChannel(socket);
if (enable_security) {
client_channel->setSecurityPolicy(THRIFT_SECURITY_PERMITTED);
client_channel->setSaslClient(std::unique_ptr<SaslClient>(
new StubSaslClient(socket->getEventBase())
));
}
TestServiceAsyncClient client(std::move(client_channel));
boost::polymorphic_downcast<HeaderClientChannel*>(
client.getChannel())->setTimeout(10000);
client.sendResponse([&](ClientReceiveState&& state) {
std::string response;
try {
TestServiceAsyncClient::recv_sendResponse(
response, state);
} catch(const std::exception& ex) {
}
EXPECT_EQ(response, "test64");
}, 64);
base.loop();
}
示例7: TEST
TEST(Duplex, DuplexTest) {
enum {START=1, COUNT=10, INTERVAL=5};
ScopedServerThread sst(getServer());
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
auto duplexChannel =
std::make_shared<DuplexChannel>(DuplexChannel::Who::CLIENT, socket);
DuplexServiceAsyncClient client(duplexChannel->getClientChannel());
bool success = false;
ThriftServer clients_server(duplexChannel->getServerChannel());
clients_server.setInterface(std::make_shared<DuplexClientInterface>(
START, COUNT, success));
clients_server.serve();
client.registerForUpdates([](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
bool res = DuplexServiceAsyncClient::recv_registerForUpdates(state);
EXPECT_TRUE(res);
}, START, COUNT, INTERVAL);
// fail on time out
base.tryRunAfterDelay([] {EXPECT_TRUE(false);}, 5000);
base.loopForever();
EXPECT_TRUE(success);
}
示例8: TEST
TEST(ThriftServer, CallbackOrderingTest) {
apache::thrift::TestThriftServerFactory<TestInterface> factory;
auto server = factory.create();
auto serverHandler = std::make_shared<TestServerEventHandler>();
TProcessorBase::addProcessorEventHandlerFactory(serverHandler);
server->setServerEventHandler(serverHandler);
ScopedServerThread sst(server);
TEventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
TestServiceAsyncClient client(
std::unique_ptr<HeaderClientChannel,
apache::thrift::async::TDelayedDestruction::Destructor>(
new HeaderClientChannel(socket)));
client.noResponse([](ClientReceiveState&& state){}, 10000);
base.tryRunAfterDelay([&](){
socket->closeNow();
}, 1);
base.tryRunAfterDelay([&](){
base.terminateLoopSoon();
}, 20);
base.loopForever();
serverHandler->check();
TProcessorBase::removeProcessorEventHandlerFactory(serverHandler);
}
示例9: main
int main(int argc, char **argv) {
TEventBase base;
int port = 8082;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, "127.0.0.1", port));
auto client_channel = HeaderClientChannel::newChannel(socket);
CalculatorAsyncClient client(std::move(client_channel));
folly::wangle::Future<int64_t> f = client.future_add(2, 3);
f.then(
[](Try<int64_t>&& t) {
std::cout << "Result = " << t.value() << std::endl;
}
);
// Run a single iteration of the event loop, in reality, this would be a
// nearly infinite loop, where we stop the looping when the client should terminate
base.loopForever();
}
示例10: TEST
TEST(RequestContext, SimpleTest) {
TEventBase base;
EXPECT_FALSE(RequestContext::create());
EXPECT_TRUE(RequestContext::create());
EXPECT_TRUE(RequestContext::get() != nullptr);
EXPECT_EQ(nullptr, RequestContext::get()->getContextData("test"));
RequestContext::get()->setContextData(
"test",
std::unique_ptr<TestData>(new TestData(10)));
base.runInEventBaseThread([&](){
EXPECT_TRUE(RequestContext::get() != nullptr);
auto data = dynamic_cast<TestData*>(
RequestContext::get()->getContextData("test"))->data_;
EXPECT_EQ(10, data);
base.terminateLoopSoon();
});
auto th = std::thread([&](){
base.loopForever();
});
th.join();
EXPECT_TRUE(RequestContext::get() != nullptr);
auto a = dynamic_cast<TestData*>(
RequestContext::get()->getContextData("test"));
auto data = a->data_;
EXPECT_EQ(10, data);
RequestContext::setContext(std::shared_ptr<RequestContext>());
// There should always be a default context
EXPECT_TRUE(nullptr != RequestContext::get());
}
示例11: stop
void TEventServer::stop() {
// TODO: We really need a memory fence or some locking here to ensure that
// the compiler doesn't optimize out eventBase. In practice, most users will
// only call stop() when the server is actually serving, so this shouldn't be
// much of an issue.
TEventBase* eventBase = serveEventBase_;
if (eventBase) {
eventBase->terminateLoopSoon();
}
}
示例12: runTest
void runTest(std::function<void(HeaderClientChannel* channel)> setup) {
ScopedServerThread sst(getServer());
TEventBase base;
auto channel = getClientChannel(&base, *sst.getAddress());
setup(channel.get());
TestServiceAsyncClient client(std::move(channel));
Countdown c(3, [&base](){base.terminateLoopSoon();});
client.sendResponse([&base,&client,&c](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
EXPECT_TRUE(state.isSecurityActive());
std::string res;
try {
TestServiceAsyncClient::recv_sendResponse(res, state);
} catch(const std::exception&) {
EXPECT_TRUE(false);
}
EXPECT_EQ(res, "10");
c.down();
}, 10);
// fail on time out
base.tryRunAfterDelay([] {EXPECT_TRUE(false);}, 5000);
base.tryRunAfterDelay([&client,&base,&c] {
client.sendResponse([&base,&c](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
EXPECT_TRUE(state.isSecurityActive());
std::string res;
try {
TestServiceAsyncClient::recv_sendResponse(res, state);
} catch(const std::exception&) {
EXPECT_TRUE(false);
}
EXPECT_EQ(res, "10");
c.down();
}, 10);
client.sendResponse([&base,&c](ClientReceiveState&& state) {
EXPECT_FALSE(state.isException());
EXPECT_TRUE(state.isSecurityActive());
std::string res;
try {
TestServiceAsyncClient::recv_sendResponse(res, state);
} catch(const std::exception&) {
EXPECT_TRUE(false);
}
EXPECT_EQ(res, "10");
c.down();
}, 10);
}, 1);
base.loopForever();
}
示例13: async_tm_update
void async_tm_update(unique_ptr<HandlerCallback<int32_t>> callback,
int32_t currentIndex) override {
auto callbackp = callback.release();
EXPECT_EQ(currentIndex, expectIndex_);
expectIndex_++;
TEventBase *eb = callbackp->getEventBase();
callbackp->resultInThread(currentIndex);
if (expectIndex_ == lastIndex_) {
success_ = true;
eb->runInEventBaseThread([eb] { eb->terminateLoopSoon(); });
}
}
示例14: main
int main() {
TEventBase base;
const int ports[] = { 9090, 9091, 9092, 9093, 9094 };
const char *hosts[] = {"127.0.0.1", "127.0.0.1", "127.0.0.1", "127.0.0.1", "127.0.0.1"};
unsigned char* images[5];
int cur_pos = 0;
std::vector<std::shared_ptr<TAsyncSocket>> sockets;
std::vector<std::shared_ptr<aobench::cpp2::AobenchServiceAsyncClient>> clients;
for (int i = 0; i < 5; ++i) {
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, hosts[i], ports[i]));
sockets.push_back(socket);
auto client_channel = HeaderClientChannel::newChannel(socket);
auto client = std::make_shared<aobench::cpp2::AobenchServiceAsyncClient>(std::move(client_channel));
clients.push_back(client);
client->render(
[&](ClientReceiveState&& state) {
std::string result;
fprintf(stderr, "received\n");
try {
aobench::cpp2::AobenchServiceAsyncClient::recv_render(result, state);
unsigned char* img = new unsigned char [result.size()];
for (int i = 0; i < static_cast<int>(result.size()); ++i) {
img[i] = static_cast<unsigned char>(result[i]);
}
images[cur_pos] = img;
++cur_pos;
if (cur_pos == 5) {
saveppm_sum("ao.ppm", 256, 256, images, 5);
for (int i = 0; i < 5; ++i) {
delete[] images[i];
}
fprintf(stderr, "accumulated\n");
}
} catch(const std::exception& ex) {
fprintf(stderr, "exception thrown %s\n", ex.what());
}
}, 256, 256, 2);
}
fprintf(stderr, "started\n");
base.loop();
fprintf(stderr, "finished\n");
}
示例15: TEST
TEST(ThriftServer, IdleTimeoutTest) {
TEventBase base;
auto port = Server::get(getServer)->getAddress().getPort();
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, "127.0.0.1", port));
auto client_channel = HeaderClientChannel::newChannel(socket);
CloseChecker checker;
client_channel->setCloseCallback(&checker);
base.runAfterDelay([&base](){
base.terminateLoopSoon();
}, 100);
base.loopForever();
EXPECT_TRUE(checker.getClosed());
}