本文整理汇总了C++中testing::DoDefault方法的典型用法代码示例。如果您正苦于以下问题:C++ testing::DoDefault方法的具体用法?C++ testing::DoDefault怎么用?C++ testing::DoDefault使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类testing
的用法示例。
在下文中一共展示了testing::DoDefault方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: TestStore
TestStore(const hashmap<std::string, process::Shared<Rootfs>>& _rootfses)
: rootfses(_rootfses)
{
using testing::_;
using testing::DoDefault;
using testing::Invoke;
ON_CALL(*this, recover())
.WillByDefault(Invoke(this, &TestStore::unmocked_recover));
EXPECT_CALL(*this, recover())
.WillRepeatedly(DoDefault());
ON_CALL(*this, get(_))
.WillByDefault(Invoke(this, &TestStore::unmocked_get));
EXPECT_CALL(*this, get(_))
.WillRepeatedly(DoDefault());
}
示例2: TestLauncher
TestLauncher(const process::Owned<slave::Launcher>& _real)
: real(_real)
{
using testing::_;
using testing::DoDefault;
ON_CALL(*this, recover(_))
.WillByDefault(InvokeRecover(this));
EXPECT_CALL(*this, recover(_))
.WillRepeatedly(DoDefault());
ON_CALL(*this, fork(_, _, _, _, _, _, _, _, _))
.WillByDefault(InvokeFork(this));
EXPECT_CALL(*this, fork(_, _, _, _, _, _, _, _, _))
.WillRepeatedly(DoDefault());
ON_CALL(*this, destroy(_))
.WillByDefault(InvokeDestroy(this));
EXPECT_CALL(*this, destroy(_))
.WillRepeatedly(DoDefault());
}
示例3: TestProvisioner
TestProvisioner(const process::Shared<Rootfs>& _rootfs)
: rootfs(_rootfs)
{
using testing::_;
using testing::DoDefault;
using testing::Invoke;
ON_CALL(*this, recover(_, _))
.WillByDefault(Invoke(this, &TestProvisioner::unmocked_recover));
EXPECT_CALL(*this, recover(_, _))
.WillRepeatedly(DoDefault());
ON_CALL(*this, provision(_, _))
.WillByDefault(Invoke(this, &TestProvisioner::unmocked_provision));
EXPECT_CALL(*this, provision(_, _))
.WillRepeatedly(DoDefault());
ON_CALL(*this, destroy(_))
.WillByDefault(Invoke(this, &TestProvisioner::unmocked_destroy));
EXPECT_CALL(*this, destroy(_))
.WillRepeatedly(DoDefault());
}
示例4: driver
// Checks that in a cluster with one slave and one framework, all of
// the slave's resources are offered to the framework.
TYPED_TEST(AllocatorTest, MockAllocator)
{
EXPECT_CALL(this->allocator, initialize(_, _, _));
Try<PID<Master> > master = this->StartMaster(&this->allocator);
ASSERT_SOME(master);
slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:2;mem:1024;disk:0");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
Try<PID<Slave> > slave = this->StartSlave(flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(this->allocator, frameworkAdded(_, _, _));
EXPECT_CALL(sched, registered(_, _, _));
// The framework should be offered all of the resources on the slave
// since it is the only framework in the cluster.
Future<Nothing> resourceOffers;
EXPECT_CALL(sched, resourceOffers(_, OfferEq(2, 1024)))
.WillOnce(FutureSatisfy(&resourceOffers));
driver.start();
AWAIT_READY(resourceOffers);
// Shut everything down.
EXPECT_CALL(this->allocator, resourcesRecovered(_, _, _))
.WillRepeatedly(DoDefault());
EXPECT_CALL(this->allocator, frameworkDeactivated(_))
.Times(AtMost(1));
EXPECT_CALL(this->allocator, frameworkRemoved(_))
.Times(AtMost(1));
driver.stop();
driver.join();
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
this->Shutdown();
}
示例5: exec
//.........这里部分代码省略.........
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, stringify(this->url.get()));
EXPECT_CALL(sched, registered(&driver, _, _));
// The framework should be offered all of the resources on the slave
// since it is the only framework running.
EXPECT_CALL(sched, resourceOffers(&driver, OfferEq(2, 1024)))
.WillOnce(LaunchTasks(1, 1, 500, "*"))
.WillRepeatedly(DeclineOffers());
EXPECT_CALL(exec, registered(_, _, _, _));
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
driver.start();
AWAIT_READY(status);
EXPECT_EQ(TASK_RUNNING, status.get().state());
// Stop the failing master from telling the slave to shut down when
// it is killed.
Future<ShutdownMessage> shutdownMessage =
DROP_PROTOBUF(ShutdownMessage(), _, _);
// Stop the framework from reregistering with the new master until the
// slave has reregistered.
DROP_PROTOBUFS(ReregisterFrameworkMessage(), _, _);
// Shutting down the masters will cause the scheduler to get
// disconnected.
EXPECT_CALL(sched, disconnected(_));
// Shutting down the masters will also cause the slave to shutdown
// frameworks that are not checkpointing, thus causing the executor
// to get shutdown.
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
this->ShutdownMasters();
AWAIT_READY(shutdownMessage);
MockAllocatorProcess<TypeParam> allocator2;
EXPECT_CALL(allocator2, initialize(_, _, _));
Try<PID<Master> > master2 = this->StartMaster(&allocator2);
ASSERT_SOME(master2);
Future<Nothing> slaveAdded;
EXPECT_CALL(allocator2, slaveAdded(_, _, _))
.WillOnce(DoAll(InvokeSlaveAdded(&allocator2),
FutureSatisfy(&slaveAdded)));
EXPECT_CALL(sched, reregistered(&driver, _));
AWAIT_READY(slaveAdded);
EXPECT_CALL(allocator2, frameworkAdded(_, _, _));
Future<vector<Offer> > resourceOffers2;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&resourceOffers2));
// We kill the filter so that ReregisterFrameworkMessages can get
// to the master now that the framework has been added, ensuring
// that the framework reregisters after the slave.
process::filter(NULL);
AWAIT_READY(resourceOffers2);
// Since the task is still running on the slave, the framework
// should only be offered the resources not being used by the task.
EXPECT_THAT(resourceOffers2.get(), OfferEq(1, 524));
// Shut everything down.
EXPECT_CALL(allocator2, resourcesRecovered(_, _, _))
.WillRepeatedly(DoDefault());
EXPECT_CALL(allocator2, frameworkDeactivated(_))
.Times(AtMost(1));
EXPECT_CALL(allocator2, frameworkRemoved(_))
.Times(AtMost(1));
driver.stop();
driver.join();
EXPECT_CALL(allocator2, slaveRemoved(_))
.Times(AtMost(1));
this->Shutdown();
}
示例6: exec
// Checks that if a slave is added after some allocations have already
// occurred, its resources are added to the available pool of
// resources and offered appropriately.
TYPED_TEST(AllocatorTest, SlaveAdded)
{
EXPECT_CALL(this->allocator, initialize(_, _, _));
master::Flags masterFlags = this->CreateMasterFlags();
masterFlags.allocation_interval = Milliseconds(50);
Try<PID<Master> > master = this->StartMaster(&this->allocator, masterFlags);
ASSERT_SOME(master);
MockExecutor exec(DEFAULT_EXECUTOR_ID);
slave::Flags flags1 = this->CreateSlaveFlags();
flags1.resources = Option<string>("cpus:3;mem:1024");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
Try<PID<Slave> > slave1 = this->StartSlave(&exec, flags1);
ASSERT_SOME(slave1);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(this->allocator, frameworkAdded(_, _, _));
EXPECT_CALL(sched, registered(_, _, _));
// We decline offers that we aren't expecting so that the resources
// get aggregated. Note that we need to do this _first_ and
// _separate_ from the expectation below so that this expectation is
// checked last and matches all possible offers.
EXPECT_CALL(sched, resourceOffers(_, _))
.WillRepeatedly(DeclineOffers());
// Initially, all of slave1's resources are avaliable.
EXPECT_CALL(sched, resourceOffers(_, OfferEq(3, 1024)))
.WillOnce(LaunchTasks(1, 2, 512));
// We filter the first time so that the unused resources
// on slave1 from the task launch won't get reoffered
// immediately and will get combined with slave2's
// resources for a single offer.
EXPECT_CALL(this->allocator, resourcesUnused(_, _, _, _))
.WillOnce(InvokeUnusedWithFilters(&this->allocator, 0.1))
.WillRepeatedly(InvokeUnusedWithFilters(&this->allocator, 0));
EXPECT_CALL(exec, registered(_, _, _, _));
Future<Nothing> launchTask;
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(DoAll(SendStatusUpdateFromTask(TASK_RUNNING),
FutureSatisfy(&launchTask)));
EXPECT_CALL(sched, statusUpdate(_, _))
.WillRepeatedly(DoDefault());
driver.start();
AWAIT_READY(launchTask);
slave::Flags flags2 = this->CreateSlaveFlags();
flags2.resources = Option<string>("cpus:4;mem:2048");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
// After slave2 launches, all of its resources are combined with the
// resources on slave1 that the task isn't using.
Future<Nothing> resourceOffers;
EXPECT_CALL(sched, resourceOffers(_, OfferEq(5, 2560)))
.WillOnce(FutureSatisfy(&resourceOffers));
Try<PID<Slave> > slave2 = this->StartSlave(flags2);
ASSERT_SOME(slave2);
AWAIT_READY(resourceOffers);
// Shut everything down.
EXPECT_CALL(this->allocator, resourcesRecovered(_, _, _))
.WillRepeatedly(DoDefault());
EXPECT_CALL(this->allocator, frameworkDeactivated(_))
.Times(AtMost(1));
EXPECT_CALL(this->allocator, frameworkRemoved(_))
.Times(AtMost(1));
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(2));
this->Shutdown();
}
示例7: initialize
//.........这里部分代码省略.........
slave::Flags flags4 = CreateSlaveFlags();
flags4.resources = Option<string>("cpus:4;mem:4096;disk:0");
EXPECT_CALL(allocator, slaveAdded(_, _, _));
Future<vector<Offer> > offers4;
EXPECT_CALL(sched3, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers4));
Try<PID<Slave> > slave4 = StartSlave(flags4);
ASSERT_SOME(slave4);
// Total cluster resources now cpus=10, mem=7680.
// user1 share = 0.2 (cpus=2, mem=1024)
// framework1 share = 1
// framework3 share = 0
// user2 share = 0.4 (cpus=4, mem=2560)
// framework2 share = 1
AWAIT_READY(offers4);
// framework3 will be offered all of slave4's resources since user1
// has the lowest user share, and framework3 has the lowest share of
// user1's frameworks.
EXPECT_THAT(offers4.get(), OfferEq(4, 4096));
// user1 share = 0.67 (cpus=6, mem=5120)
// framework1 share = 0.33 (cpus=2, mem=1024)
// framework3 share = 0.8 (cpus=4, mem=4096)
// user2 share = 0.4 (cpus=4, mem=2560)
// framework2 share = 1
FrameworkInfo frameworkInfo4;
frameworkInfo4.set_name("framework4");
frameworkInfo4.set_user("user1");
frameworkInfo4.set_role("role1");
MockScheduler sched4;
MesosSchedulerDriver driver4(&sched4, frameworkInfo4, master.get());
Future<Nothing> frameworkAdded4;
EXPECT_CALL(allocator, frameworkAdded(_, _, _))
.WillOnce(DoAll(InvokeFrameworkAdded(&allocator),
FutureSatisfy(&frameworkAdded4)));
EXPECT_CALL(sched4, registered(_, _, _));
driver4.start();
AWAIT_READY(frameworkAdded4);
slave::Flags flags5 = CreateSlaveFlags();
flags5.resources = Option<string>("cpus:1;mem:512;disk:0");
EXPECT_CALL(allocator, slaveAdded(_, _, _));
Future<vector<Offer> > offers5;
EXPECT_CALL(sched2, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers5));
Try<PID<Slave> > slave5 = StartSlave(flags5);
ASSERT_SOME(slave5);
// Total cluster resources now cpus=11, mem=8192
// user1 share = 0.63 (cpus=6, mem=5120)
// framework1 share = 0.33 (cpus=2, mem=1024)
// framework3 share = 0.8 (cpus=4, mem=4096)
// framework4 share = 0
// user2 share = 0.36 (cpus=4, mem=2560)
// framework2 share = 1
AWAIT_READY(offers5);
// Even though framework4 doesn't have any resources, user2 has a
// lower share than user1, so framework2 receives slave4's resources
EXPECT_THAT(offers5.get(), OfferEq(1, 512));
// Shut everything down.
EXPECT_CALL(allocator, resourcesRecovered(_, _, _))
.WillRepeatedly(DoDefault());
EXPECT_CALL(allocator, frameworkDeactivated(_))
.Times(AtMost(4));
EXPECT_CALL(allocator, frameworkRemoved(_))
.Times(AtMost(4));
driver1.stop();
driver1.join();
driver2.stop();
driver2.join();
driver3.stop();
driver3.join();
driver4.stop();
driver4.join();
EXPECT_CALL(allocator, slaveRemoved(_))
.Times(AtMost(5));
Shutdown();
}