本文整理汇总了C++中EventBase::loop方法的典型用法代码示例。如果您正苦于以下问题:C++ EventBase::loop方法的具体用法?C++ EventBase::loop怎么用?C++ EventBase::loop使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类EventBase
的用法示例。
在下文中一共展示了EventBase::loop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: callback
/**
* Test that EventBase::loop() correctly detects when there are no more events
* left to run.
*
* This uses a single callback, which alternates registering itself as a loop
* callback versus a EventHandler callback. This exercises a regression where
* EventBase::loop() incorrectly exited if there were no more fd handlers
* registered, but a loop callback installed a new fd handler.
*/
TEST(EventBaseTest, LoopTermination) {
EventBase eventBase;
// Open a pipe and close the write end,
// so the read endpoint will be readable
int pipeFds[2];
int rc = pipe(pipeFds);
ASSERT_EQ(rc, 0);
close(pipeFds[1]);
TerminateTestCallback callback(&eventBase, pipeFds[0]);
// Test once where the callback will exit after a loop callback
callback.reset(10, 100);
eventBase.runInLoop(&callback);
eventBase.loop();
ASSERT_EQ(callback.getLoopInvocations(), 10);
ASSERT_EQ(callback.getEventInvocations(), 9);
// Test once where the callback will exit after an fd event callback
callback.reset(100, 7);
eventBase.runInLoop(&callback);
eventBase.loop();
ASSERT_EQ(callback.getLoopInvocations(), 7);
ASSERT_EQ(callback.getEventInvocations(), 7);
close(pipeFds[0]);
}
示例2: serverThread
TEST(TEventServerTest, ExplicitHeaderProtocolAndTransport) {
// Initialize thrift service
EventBase eventBase;
auto handler = make_shared<TEventServerServiceHandler>(&eventBase);
auto processor = make_shared<TEventServerTestServiceAsyncProcessor>(handler);
auto headerProtocolFactory = make_shared<THeaderProtocolFactory>();
int serverPort = 0;
auto server =
make_shared<TEventServer>(processor, headerProtocolFactory, serverPort);
server->setTransportType(TEventServer::HEADER);
ScopedServerThread serverThread(server);
auto address = *serverThread.getAddress();
auto socket = TAsyncSocket::newSocket(&eventBase, address);
auto channel = THeaderAsyncChannel::newChannel(socket);
auto protocolFactory = make_shared<THeaderProtocolFactory>();
auto cl = make_shared<TEventServerTestServiceCobClient>(
channel, protocolFactory.get());
cl->noop(bind(responseReceived, placeholders::_1, true));
eventBase.loop();
serverThread.stop();
eventBase.loop();
}
示例3: readCallback
/**
* Test a full unencrypted codepath
*/
TEST(AsyncSSLSocketTest, UnencryptedTest) {
EventBase base;
auto clientCtx = std::make_shared<folly::SSLContext>();
auto serverCtx = std::make_shared<folly::SSLContext>();
int fds[2];
getfds(fds);
getctx(clientCtx, serverCtx);
auto client = AsyncSSLSocket::newSocket(
clientCtx, &base, fds[0], false, true);
auto server = AsyncSSLSocket::newSocket(
serverCtx, &base, fds[1], true, true);
ReadCallbackTerminator readCallback(&base, nullptr);
server->setReadCB(&readCallback);
readCallback.setSocket(server);
uint8_t buf[128];
memset(buf, 'a', sizeof(buf));
client->write(nullptr, buf, sizeof(buf));
// Check that bytes are unencrypted
char c;
EXPECT_EQ(1, recv(fds[1], &c, 1, MSG_PEEK));
EXPECT_EQ('a', c);
EventBaseAborter eba(&base, 3000);
base.loop();
EXPECT_EQ(1, readCallback.buffers.size());
EXPECT_EQ(AsyncSSLSocket::STATE_UNENCRYPTED, client->getSSLState());
server->setReadCB(&readCallback);
// Unencrypted
server->sslAccept(nullptr);
client->sslConn(nullptr);
// Do NOT wait for handshake, writing should be queued and happen after
client->write(nullptr, buf, sizeof(buf));
// Check that bytes are *not* unencrypted
char c2;
EXPECT_EQ(1, recv(fds[1], &c2, 1, MSG_PEEK));
EXPECT_NE('a', c2);
base.loop();
EXPECT_EQ(2, readCallback.buffers.size());
EXPECT_EQ(AsyncSSLSocket::STATE_ESTABLISHED, client->getSSLState());
}
示例4: clientSock
/**
* Test requireClientCert with no client cert
*/
TEST(AsyncSSLSocketTest, NoClientCertHandshakeError) {
EventBase eventBase;
auto clientCtx = std::make_shared<SSLContext>();
auto serverCtx = std::make_shared<SSLContext>();
serverCtx->setVerificationOption(
SSLContext::SSLVerifyPeerEnum::VERIFY_REQ_CLIENT_CERT);
serverCtx->ciphers("ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
serverCtx->loadPrivateKey(testKey);
serverCtx->loadCertificate(testCert);
serverCtx->loadTrustedCertificates(testCA);
serverCtx->loadClientCAList(testCA);
clientCtx->setVerificationOption(SSLContext::SSLVerifyPeerEnum::NO_VERIFY);
clientCtx->ciphers("ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
int fds[2];
getfds(fds);
AsyncSSLSocket::UniquePtr clientSock(
new AsyncSSLSocket(clientCtx, &eventBase, fds[0], false));
AsyncSSLSocket::UniquePtr serverSock(
new AsyncSSLSocket(serverCtx, &eventBase, fds[1], true));
SSLHandshakeClient client(std::move(clientSock), false, false);
SSLHandshakeServer server(std::move(serverSock), false, false);
eventBase.loop();
EXPECT_FALSE(server.handshakeVerify_);
EXPECT_FALSE(server.handshakeSuccess_);
EXPECT_TRUE(server.handshakeError_);
EXPECT_LE(0, client.handshakeTime.count());
EXPECT_LE(0, server.handshakeTime.count());
}
示例5: acceptor
TEST(AcceptorTest, Basic) {
EventBase base;
auto socket = AsyncServerSocket::newSocket(&base);
ServerSocketConfig config;
TestAcceptor acceptor(config);
socket->addAcceptCallback(&acceptor, &base);
acceptor.init(socket.get(), &base);
socket->bind(0);
socket->listen(100);
SocketAddress addy;
socket->getAddress(&addy);
socket->startAccepting();
auto client_socket = AsyncSocket::newSocket(
&base, addy);
base.loopForever();
CHECK_EQ(acceptor.getNumConnections(), 1);
CHECK(acceptor.getState() == Acceptor::State::kRunning);
acceptor.forceStop();
socket->stopAccepting();
base.loop();
}
示例6:
TEST(AsyncTimeout, cancel_schedule) {
int value = 0;
int const expected = 10;
EventBase manager;
std::unique_ptr<AsyncTimeout> observer;
std::weak_ptr<RequestContext> rctx_weak_ptr;
{
RequestContextScopeGuard rctx_guard;
rctx_weak_ptr = RequestContext::saveContext();
observer = AsyncTimeout::schedule(
std::chrono::milliseconds(100), manager, [&]() noexcept {
value = expected;
});
observer->cancelTimeout();
}
// Ensure that RequestContext created for the scope has been released and
// deleted.
EXPECT_EQ(rctx_weak_ptr.expired(), true);
manager.loop();
EXPECT_NE(expected, value);
}
示例7: eh
TEST_F(EventHandlerTest, many_concurrent_producers) {
const size_t writes = 200;
const size_t nproducers = 20;
size_t readsRemaining = writes;
runInThreadsAndWait({
[&] {
EventBase eb;
EventHandlerMock eh(&eb, efd);
eh.registerHandler(EventHandler::READ | EventHandler::PERSIST);
EXPECT_CALL(eh, _handlerReady(_))
.Times(writes)
.WillRepeatedly(Invoke([&](uint16_t events) {
efd_read();
if (--readsRemaining == 0) {
eh.unregisterHandler();
}
}));
eb.loop();
},
[&] {
runInThreadsAndWait(nproducers, [&](size_t k) {
for (size_t i = 0; i < writes / nproducers; ++i) {
this_thread::sleep_for(chrono::milliseconds(1));
efd_write(1);
}
});
},
});
EXPECT_EQ(0, readsRemaining);
}
示例8: sst
TEST(ThriftServer, OnewayFutureClientTest) {
using std::chrono::steady_clock;
apache::thrift::TestThriftServerFactory<TestInterface> factory;
ScopedServerThread sst(factory.create());
EventBase base;
std::shared_ptr<TAsyncSocket> socket(
TAsyncSocket::newSocket(&base, *sst.getAddress()));
auto channel = HeaderClientChannel::newChannel(socket);
FutureServiceAsyncClient client(std::move(channel));
auto future = client.future_noResponse(100);
steady_clock::time_point sent = steady_clock::now();
// wait for future to finish.
base.loop();
steady_clock::time_point waited = steady_clock::now();
future.value();
steady_clock::time_point got = steady_clock::now();
steady_clock::duration waitTime = waited - sent;
steady_clock::duration gotTime = got - waited;
int factor = 1;
EXPECT_GE(waitTime, factor * gotTime);
// Client returns quickly because it is oneway, need to sleep for
// some time so at least when the request reaches the server it is
// not already stopped.
// Also consider use a Baton if this is still flaky under stress run.
/* sleep override */ std::this_thread::sleep_for(
std::chrono::milliseconds(200));
}
示例9: t
/**
* Test rescheduling the same timeout multiple times
*/
TEST(EventBaseTest, ReuseTimeout) {
EventBase eb;
vector<uint32_t> timeouts;
timeouts.push_back(10);
timeouts.push_back(30);
timeouts.push_back(15);
ReschedulingTimeout t(&eb, timeouts);
t.start();
TimePoint start;
eb.loop();
TimePoint end;
// Use a higher tolerance than usual. We're waiting on 3 timeouts
// consecutively. In general, each timeout may go over by a few
// milliseconds, and we're tripling this error by witing on 3 timeouts.
milliseconds tolerance{6};
ASSERT_EQ(timeouts.size(), t.timestamps.size());
uint32_t total = 0;
for (size_t n = 0; n < timeouts.size(); ++n) {
total += timeouts[n];
T_CHECK_TIMEOUT(start, t.timestamps[n], milliseconds(total), tolerance);
}
T_CHECK_TIMEOUT(start, end, milliseconds(total), tolerance);
}
示例10: milliseconds
/**
* Test rescheduling a timeout before it has fired
*/
TEST(EventBaseTest, RescheduleTimeout) {
EventBase eb;
TestTimeout t1(&eb);
TestTimeout t2(&eb);
TestTimeout t3(&eb);
t1.scheduleTimeout(15);
t2.scheduleTimeout(30);
t3.scheduleTimeout(30);
auto f = static_cast<bool(AsyncTimeout::*)(uint32_t)>(
&AsyncTimeout::scheduleTimeout);
// after 10ms, reschedule t2 to run sooner than originally scheduled
eb.tryRunAfterDelay(std::bind(f, &t2, 10), 10);
// after 10ms, reschedule t3 to run later than originally scheduled
eb.tryRunAfterDelay(std::bind(f, &t3, 40), 10);
TimePoint start;
eb.loop();
TimePoint end;
T_CHECK_TIMEOUT(start, t1.timestamp, milliseconds(15));
T_CHECK_TIMEOUT(start, t2.timestamp, milliseconds(20));
T_CHECK_TIMEOUT(start, t3.timestamp, milliseconds(50));
T_CHECK_TIMEOUT(start, end, milliseconds(50));
}
示例11: timeoutExpired
/**
* Test destroying a scheduled timeout object
*/
TEST(EventBaseTest, DestroyTimeout) {
class DestroyTimeout : public AsyncTimeout {
public:
DestroyTimeout(EventBase* eb, AsyncTimeout* t)
: AsyncTimeout(eb)
, timeout_(t) {}
virtual void timeoutExpired() noexcept {
delete timeout_;
}
private:
AsyncTimeout* timeout_;
};
EventBase eb;
TestTimeout* t1 = new TestTimeout(&eb);
t1->scheduleTimeout(30);
DestroyTimeout dt(&eb, t1);
dt.scheduleTimeout(10);
TimePoint start;
eb.loop();
TimePoint end;
T_CHECK_TIMEOUT(start, end, milliseconds(10));
}
示例12: handshakeCallback
/**
* Test SSL client socket timeout
*/
TEST(AsyncSSLSocketTest, SSLClientTimeoutTest) {
// Start listening on a local port
EmptyReadCallback readCallback;
HandshakeCallback handshakeCallback(&readCallback,
HandshakeCallback::EXPECT_ERROR);
HandshakeTimeoutCallback acceptCallback(&handshakeCallback);
TestSSLServer server(&acceptCallback);
// Set up SSL client
EventBase eventBase;
auto client =
std::make_shared<SSLClient>(&eventBase, server.getAddress(), 1, 10);
client->connect(true /* write before connect completes */);
EventBaseAborter eba(&eventBase, 3000);
eventBase.loop();
usleep(100000);
// This is checking that the connectError callback precedes any queued
// writeError callbacks. This matches AsyncSocket's behavior
EXPECT_EQ(client->getWriteAfterConnectErrors(), 1);
EXPECT_EQ(client->getErrors(), 1);
EXPECT_EQ(client->getMiss(), 0);
EXPECT_EQ(client->getHit(), 0);
cerr << "SSLClientTimeoutTest test completed" << endl;
}
示例13: handler
/**
* Test (READ | WRITE | PERSIST)
*/
TEST(EventBaseTest, ReadWritePersist) {
EventBase eb;
SocketPair sp;
// Register for read and write events
TestHandler handler(&eb, sp[0]);
handler.registerHandler(EventHandler::READ | EventHandler::WRITE |
EventHandler::PERSIST);
// Register timeouts to perform several reads and writes
ScheduledEvent events[] = {
{ 10, EventHandler::WRITE, 2345 },
{ 20, EventHandler::READ, 0 },
{ 35, EventHandler::WRITE, 200 },
{ 45, EventHandler::WRITE, 15 },
{ 55, EventHandler::READ, 0 },
{ 120, EventHandler::WRITE, 2345 },
{ 0, 0, 0 },
};
scheduleEvents(&eb, sp[1], events);
// Schedule a timeout to unregister the handler
eb.tryRunAfterDelay(std::bind(&TestHandler::unregisterHandler, &handler), 80);
// Loop
TimePoint start;
eb.loop();
TimePoint end;
ASSERT_EQ(handler.log.size(), 6);
// Since we didn't fill up the write buffer immediately, there should
// be an immediate event for writability.
ASSERT_EQ(handler.log[0].events, EventHandler::WRITE);
T_CHECK_TIMEOUT(start, handler.log[0].timestamp, milliseconds(0));
ASSERT_EQ(handler.log[0].bytesRead, 0);
ASSERT_GT(handler.log[0].bytesWritten, 0);
// Events 1 through 5 should correspond to the scheduled events
for (int n = 1; n < 6; ++n) {
ScheduledEvent* event = &events[n - 1];
T_CHECK_TIMEOUT(start, handler.log[n].timestamp,
milliseconds(event->milliseconds));
if (event->events == EventHandler::READ) {
ASSERT_EQ(handler.log[n].events, EventHandler::WRITE);
ASSERT_EQ(handler.log[n].bytesRead, 0);
ASSERT_GT(handler.log[n].bytesWritten, 0);
} else {
ASSERT_EQ(handler.log[n].events, EventHandler::READ);
ASSERT_EQ(handler.log[n].bytesRead, event->length);
ASSERT_EQ(handler.log[n].bytesWritten, 0);
}
}
// The timeout should have unregistered the handler before the last write.
// Make sure that data is still waiting to be read
size_t bytesRemaining = readUntilEmpty(sp[0]);
ASSERT_EQ(bytesRemaining, events[5].length);
}
示例14: timeouts
/**
* Verify that idle time is correctly accounted for when decaying our loop
* time.
*
* This works by creating a high loop time (via usleep), expecting a latency
* callback with known value, and then scheduling a timeout for later. This
* later timeout is far enough in the future that the idle time should have
* caused the loop time to decay.
*/
TEST(EventBaseTest, IdleTime) {
EventBase eventBase;
eventBase.setLoadAvgMsec(1000);
eventBase.resetLoadAvg(5900.0);
std::deque<uint64_t> timeouts0(4, 8080);
timeouts0.push_front(8000);
timeouts0.push_back(14000);
IdleTimeTimeoutSeries tos0(&eventBase, timeouts0);
std::deque<uint64_t> timeouts(20, 20);
std::unique_ptr<IdleTimeTimeoutSeries> tos;
int64_t testStart = duration_cast<microseconds>(
std::chrono::steady_clock::now().time_since_epoch()).count();
bool hostOverloaded = false;
int latencyCallbacks = 0;
eventBase.setMaxLatency(6000, [&]() {
++latencyCallbacks;
switch (latencyCallbacks) {
case 1:
if (tos0.getTimeouts() < 6) {
// This could only happen if the host this test is running
// on is heavily loaded.
int64_t maxLatencyReached = duration_cast<microseconds>(
std::chrono::steady_clock::now().time_since_epoch()).count();
ASSERT_LE(43800, maxLatencyReached - testStart);
hostOverloaded = true;
break;
}
ASSERT_EQ(6, tos0.getTimeouts());
ASSERT_GE(6100, eventBase.getAvgLoopTime() - 1200);
ASSERT_LE(6100, eventBase.getAvgLoopTime() + 1200);
tos.reset(new IdleTimeTimeoutSeries(&eventBase, timeouts));
break;
default:
FAIL() << "Unexpected latency callback";
break;
}
});
// Kick things off with an "immedite" timeout
tos0.scheduleTimeout(1);
eventBase.loop();
if (hostOverloaded) {
return;
}
ASSERT_EQ(1, latencyCallbacks);
ASSERT_EQ(7, tos0.getTimeouts());
ASSERT_GE(5900, eventBase.getAvgLoopTime() - 1200);
ASSERT_LE(5900, eventBase.getAvgLoopTime() + 1200);
ASSERT_TRUE(!!tos);
ASSERT_EQ(21, tos->getTimeouts());
}
示例15: c
BENCHMARK(timeMeasurementsOn, n) {
EventBase eventBase;
while (n--) {
CountedLoopCallback c(&eventBase, 10);
eventBase.runInLoop(&c);
eventBase.loop();
}
}