本文整理汇总了C++中testing::AtMost方法的典型用法代码示例。如果您正苦于以下问题:C++ testing::AtMost方法的具体用法?C++ testing::AtMost怎么用?C++ testing::AtMost使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类testing
的用法示例。
在下文中一共展示了testing::AtMost方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: driver
TEST_F(ResourceOffersTest, Request)
{
MockAllocatorProcess<HierarchicalDRFAllocatorProcess> allocator;
EXPECT_CALL(allocator, initialize(_, _, _))
.Times(1);
Try<PID<Master> > master = StartMaster(&allocator);
ASSERT_SOME(master);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(allocator, frameworkAdded(_, _, _))
.Times(1);
Future<Nothing> registered;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureSatisfy(®istered));
driver.start();
AWAIT_READY(registered);
vector<Request> sent;
Request request;
request.mutable_slave_id()->set_value("test");
sent.push_back(request);
Future<vector<Request> > received;
EXPECT_CALL(allocator, resourcesRequested(_, _))
.WillOnce(FutureArg<1>(&received));
driver.requestResources(sent);
AWAIT_READY(received);
EXPECT_EQ(sent.size(), received.get().size());
EXPECT_NE(0u, received.get().size());
EXPECT_EQ(request.slave_id(), received.get()[0].slave_id());
EXPECT_CALL(allocator, frameworkDeactivated(_))
.Times(AtMost(1)); // Races with shutting down the cluster.
EXPECT_CALL(allocator, frameworkRemoved(_))
.Times(AtMost(1)); // Races with shutting down the cluster.
driver.stop();
driver.join();
Shutdown();
}
示例2: driver
// Checks that in a cluster with one slave and one framework, all of
// the slave's resources are offered to the framework.
TYPED_TEST(AllocatorTest, MockAllocator)
{
EXPECT_CALL(this->allocator, initialize(_, _, _));
Try<PID<Master> > master = this->StartMaster(&this->allocator);
ASSERT_SOME(master);
slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:2;mem:1024;disk:0");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
Try<PID<Slave> > slave = this->StartSlave(flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(this->allocator, frameworkAdded(_, _, _));
EXPECT_CALL(sched, registered(_, _, _));
// The framework should be offered all of the resources on the slave
// since it is the only framework in the cluster.
Future<Nothing> resourceOffers;
EXPECT_CALL(sched, resourceOffers(_, OfferEq(2, 1024)))
.WillOnce(FutureSatisfy(&resourceOffers));
driver.start();
AWAIT_READY(resourceOffers);
// Shut everything down.
EXPECT_CALL(this->allocator, resourcesRecovered(_, _, _))
.WillRepeatedly(DoDefault());
EXPECT_CALL(this->allocator, frameworkDeactivated(_))
.Times(AtMost(1));
EXPECT_CALL(this->allocator, frameworkRemoved(_))
.Times(AtMost(1));
driver.stop();
driver.join();
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
this->Shutdown();
}
示例3: driver
TEST_F(ExceptionTest, DisallowSchedulerActionsOnAbort)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
Try<PID<Slave> > slave = StartSlave();
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
Future<Nothing> registered;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureSatisfy(®istered));
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillRepeatedly(Return());
driver.start();
AWAIT_READY(registered);
EXPECT_CALL(sched, offerRescinded(&driver, _))
.Times(AtMost(1));
ASSERT_EQ(DRIVER_ABORTED, driver.abort());
ASSERT_EQ(DRIVER_ABORTED, driver.reviveOffers());
driver.stop();
Shutdown();
}
示例4: exec
// This test checks that a scheduler exit shuts down the executor.
TEST_F(FaultToleranceTest, SchedulerExit)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
MockExecutor exec(DEFAULT_EXECUTOR_ID);
Try<PID<Slave> > slave = StartSlave(&exec);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer> > offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
AWAIT_READY(offers);
TaskInfo task;
task.set_name("");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id());
task.mutable_resources()->MergeFrom(offers.get()[0].resources());
task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO);
vector<TaskInfo> tasks;
tasks.push_back(task);
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
EXPECT_CALL(exec, registered(_, _, _, _));
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
driver.launchTasks(offers.get()[0].id(), tasks);
AWAIT_READY(status);
EXPECT_EQ(TASK_RUNNING, status.get().state());
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
Shutdown();
}
示例5: registered
TEST(ResourceOffersTest, ResourcesGetReofferedWhenUnused)
{
ASSERT_TRUE(GTEST_IS_THREADSAFE);
PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false);
MockScheduler sched1;
MesosSchedulerDriver driver1(&sched1, "", DEFAULT_EXECUTOR_INFO, master);
vector<Offer> offers;
trigger sched1ResourceOfferCall;
EXPECT_CALL(sched1, registered(&driver1, _))
.Times(1);
EXPECT_CALL(sched1, resourceOffers(&driver1, _))
.WillOnce(DoAll(SaveArg<1>(&offers),
Trigger(&sched1ResourceOfferCall)))
.WillRepeatedly(Return());
driver1.start();
WAIT_UNTIL(sched1ResourceOfferCall);
EXPECT_NE(0, offers.size());
vector<TaskDescription> tasks; // Use nothing!
driver1.launchTasks(offers[0].id(), tasks);
driver1.stop();
driver1.join();
MockScheduler sched2;
MesosSchedulerDriver driver2(&sched2, "", DEFAULT_EXECUTOR_INFO, master);
trigger sched2ResourceOfferCall;
EXPECT_CALL(sched2, registered(&driver2, _))
.Times(1);
EXPECT_CALL(sched2, resourceOffers(&driver2, _))
.WillOnce(Trigger(&sched2ResourceOfferCall))
.WillRepeatedly(Return());
EXPECT_CALL(sched2, offerRescinded(&driver2, _))
.Times(AtMost(1));
driver2.start();
WAIT_UNTIL(sched2ResourceOfferCall);
driver2.stop();
driver2.join();
local::shutdown();
}
示例6: driver
TEST(FaultToleranceTest, FrameworkReregister)
{
ASSERT_TRUE(GTEST_IS_THREADSAFE);
MockFilter filter;
process::filter(&filter);
EXPECT_MESSAGE(filter, _, _, _)
.WillRepeatedly(Return(false));
PID<Master> master = local::launch(1, 2, 1 * Gigabyte, false);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
trigger schedRegisteredCall, schedReregisteredCall;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(Trigger(&schedRegisteredCall));
EXPECT_CALL(sched, reregistered(&driver, _))
.WillOnce(Trigger(&schedReregisteredCall));
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillRepeatedly(Return());
EXPECT_CALL(sched, offerRescinded(&driver, _))
.Times(AtMost(1));
process::Message message;
EXPECT_MESSAGE(filter, Eq(FrameworkRegisteredMessage().GetTypeName()), _, _)
.WillOnce(DoAll(SaveArgField<0>(&process::MessageEvent::message, &message),
Return(false)));
driver.start();
WAIT_UNTIL(schedRegisteredCall); // Ensures registered message is received.
// Simulate a spurious newMasterDetected event (e.g., due to ZooKeeper
// expiration) at the scheduler.
NewMasterDetectedMessage newMasterDetectedMsg;
newMasterDetectedMsg.set_pid(master);
process::post(message.to, newMasterDetectedMsg);
WAIT_UNTIL(schedReregisteredCall);
driver.stop();
driver.join();
local::shutdown();
process::filter(NULL);
}
示例7: driver
TEST_F(FaultToleranceTest, FrameworkReregister)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
Try<PID<Slave> > slave = StartSlave();
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
Future<Nothing> registered;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureSatisfy(®istered));
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillRepeatedly(Return());
Future<process::Message> message =
FUTURE_MESSAGE(Eq(FrameworkRegisteredMessage().GetTypeName()), _, _);
driver.start();
AWAIT_READY(message); // Framework registered message, to get the pid.
AWAIT_READY(registered); // Framework registered call.
Future<Nothing> disconnected;
EXPECT_CALL(sched, disconnected(&driver))
.WillOnce(FutureSatisfy(&disconnected));
Future<Nothing> reregistered;
EXPECT_CALL(sched, reregistered(&driver, _))
.WillOnce(FutureSatisfy(&reregistered));
EXPECT_CALL(sched, offerRescinded(&driver, _))
.Times(AtMost(1));
// Simulate a spurious newMasterDetected event (e.g., due to ZooKeeper
// expiration) at the scheduler.
NewMasterDetectedMessage newMasterDetectedMsg;
newMasterDetectedMsg.set_pid(master.get());
process::post(message.get().to, newMasterDetectedMsg);
AWAIT_READY(disconnected);
AWAIT_READY(reregistered);
driver.stop();
driver.join();
Shutdown();
}
示例8: driver
TEST(MasterTest, ResourceOfferWithMultipleSlaves)
{
ASSERT_TRUE(GTEST_IS_THREADSAFE);
PID<Master> master = local::launch(10, 2, 1 * Gigabyte, false, false);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, master);
vector<SlaveOffer> offers;
trigger resourceOfferCall;
EXPECT_CALL(sched, getFrameworkName(&driver))
.WillOnce(Return(""));
EXPECT_CALL(sched, getExecutorInfo(&driver))
.WillOnce(Return(DEFAULT_EXECUTOR_INFO));
EXPECT_CALL(sched, registered(&driver, _))
.Times(1);
EXPECT_CALL(sched, resourceOffer(&driver, _, _))
.WillOnce(DoAll(SaveArg<2>(&offers), Trigger(&resourceOfferCall)))
.WillRepeatedly(Return());
EXPECT_CALL(sched, offerRescinded(&driver, _))
.Times(AtMost(1));
driver.start();
WAIT_UNTIL(resourceOfferCall);
EXPECT_NE(0, offers.size());
EXPECT_GE(10, offers.size());
Resources resources(offers[0].resources());
EXPECT_EQ(2, resources.get("cpus", Resource::Scalar()).value());
EXPECT_EQ(1024, resources.get("mem", Resource::Scalar()).value());
driver.stop();
driver.join();
local::shutdown();
}
示例9: exec
// This test verifies that an authorized task launch is successful.
TEST_F(MasterAuthorizationTest, AuthorizedTask)
{
// Setup ACLs so that the framework can launch tasks as "foo".
ACLs acls;
mesos::ACL::RunTasks* acl = acls.add_run_tasks();
acl->mutable_principals()->add_values(DEFAULT_FRAMEWORK_INFO.principal());
acl->mutable_users()->add_values("foo");
master::Flags flags = CreateMasterFlags();
flags.acls = acls;
Try<PID<Master> > master = StartMaster(flags);
ASSERT_SOME(master);
// Create an authorized executor.
ExecutorInfo executor; // Bug in gcc 4.1.*, must assign on next line.
executor = CREATE_EXECUTOR_INFO("test-executor", "exit 1");
executor.mutable_command()->set_user("foo");
MockExecutor exec(executor.executor_id());
Try<PID<Slave> > slave = StartSlave(&exec);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _))
.Times(1);
Future<vector<Offer> > offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
// Create an authorized task.
TaskInfo task;
task.set_name("test");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id());
task.mutable_resources()->MergeFrom(offers.get()[0].resources());
task.mutable_executor()->MergeFrom(executor);
vector<TaskInfo> tasks;
tasks.push_back(task);
EXPECT_CALL(exec, registered(_, _, _, _))
.Times(1);
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
driver.launchTasks(offers.get()[0].id(), tasks);
AWAIT_READY(status);
EXPECT_EQ(TASK_RUNNING, status.get().state());
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
Shutdown(); // Must shutdown before 'containerizer' gets deallocated.
}
示例10: driver
//.........这里部分代码省略.........
Try<PID<Slave> > slave1 = StartSlave(&exec1);
ASSERT_SOME(slave1);
AWAIT_READY(offers1);
EXPECT_NE(0u, offers1.get().size());
// Launch the first task with the default executor id.
ExecutorInfo executor1;
executor1 = DEFAULT_EXECUTOR_INFO;
executor1.mutable_command()->set_value("exit 1");
TaskInfo task1 = createTask(
offers1.get()[0], executor1.command().value(), executor1.executor_id());
vector<TaskInfo> tasks1;
tasks1.push_back(task1);
// Return a pending future from authorizer.
Future<Nothing> future;
Promise<bool> promise;
EXPECT_CALL(authorizer, authorize(An<const mesos::ACL::RunTasks&>()))
.WillOnce(DoAll(FutureSatisfy(&future),
Return(promise.future())));
driver.launchTasks(offers1.get()[0].id(), tasks1);
// Wait until authorization is in progress.
AWAIT_READY(future);
Future<vector<Offer> > offers2;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers2))
.WillRepeatedly(Return()); // Ignore subsequent offers.
// Now start the second slave.
MockExecutor exec2(DEFAULT_EXECUTOR_ID);
Try<PID<Slave> > slave2 = StartSlave(&exec2);
ASSERT_SOME(slave2);
AWAIT_READY(offers2);
EXPECT_NE(0u, offers2.get().size());
// Now launch the second task with the same executor id but
// a different executor command.
ExecutorInfo executor2;
executor2 = executor1;
executor2.mutable_command()->set_value("exit 2");
TaskInfo task2 = createTask(
offers2.get()[0], executor2.command().value(), executor2.executor_id());
vector<TaskInfo> tasks2;
tasks2.push_back(task2);
EXPECT_CALL(exec2, registered(_, _, _, _))
.Times(1);
EXPECT_CALL(exec2, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
Future<TaskStatus> status2;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status2));
EXPECT_CALL(authorizer, authorize(An<const mesos::ACL::RunTasks&>()))
.WillOnce(Return(true));
driver.launchTasks(offers2.get()[0].id(), tasks2);
AWAIT_READY(status2);
ASSERT_EQ(TASK_RUNNING, status2.get().state());
EXPECT_CALL(exec1, registered(_, _, _, _))
.Times(1);
EXPECT_CALL(exec1, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
Future<TaskStatus> status1;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status1));
// Complete authorization of 'task1'.
promise.set(true);
AWAIT_READY(status1);
ASSERT_EQ(TASK_RUNNING, status1.get().state());
EXPECT_CALL(exec1, shutdown(_))
.Times(AtMost(1));
EXPECT_CALL(exec2, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
Shutdown();
}
示例11: exec
// This test verifies that when an executor terminates before
// registering with slave, it is properly cleaned up.
TEST_F(SlaveTest, RemoveUnregisteredTerminatedExecutor)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
MockExecutor exec(DEFAULT_EXECUTOR_ID);
TestContainerizer containerizer(&exec);
Try<PID<Slave> > slave = StartSlave(&containerizer);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _))
.Times(1);
Future<vector<Offer> > offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
TaskInfo task;
task.set_name("");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id());
task.mutable_resources()->MergeFrom(offers.get()[0].resources());
task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO);
vector<TaskInfo> tasks;
tasks.push_back(task);
// Drop the registration message from the executor to the slave.
Future<process::Message> registerExecutorMessage =
DROP_MESSAGE(Eq(RegisterExecutorMessage().GetTypeName()), _, _);
driver.launchTasks(offers.get()[0].id(), tasks);
AWAIT_READY(registerExecutorMessage);
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
Future<Nothing> schedule =
FUTURE_DISPATCH(_, &GarbageCollectorProcess::schedule);
// Now kill the executor.
containerizer.destroy(offers.get()[0].framework_id(), DEFAULT_EXECUTOR_ID);
AWAIT_READY(status);
EXPECT_EQ(TASK_LOST, status.get().state());
// We use 'gc.schedule' as a signal for the executor being cleaned
// up by the slave.
AWAIT_READY(schedule);
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
Shutdown(); // Must shutdown before 'containerizer' gets deallocated.
}
示例12: driver
// This test checks that a scheduler gets a slave lost
// message for a partioned slave.
TEST_F(PartitionTest, PartitionedSlave)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
// Set these expectations up before we spawn the slave so that we
// don't miss the first PING.
Future<Message> ping = FUTURE_MESSAGE(Eq("PING"), _, _);
// Drop all the PONGs to simulate slave partition.
DROP_MESSAGES(Eq("PONG"), _, _);
Try<PID<Slave> > slave = StartSlave();
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<Nothing> resourceOffers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureSatisfy(&resourceOffers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
// Need to make sure the framework AND slave have registered with
// master. Waiting for resource offers should accomplish both.
AWAIT_READY(resourceOffers);
Clock::pause();
EXPECT_CALL(sched, offerRescinded(&driver, _))
.Times(AtMost(1));
Future<Nothing> slaveLost;
EXPECT_CALL(sched, slaveLost(&driver, _))
.WillOnce(FutureSatisfy(&slaveLost));
// Now advance through the PINGs.
uint32_t pings = 0;
while (true) {
AWAIT_READY(ping);
pings++;
if (pings == master::MAX_SLAVE_PING_TIMEOUTS) {
break;
}
ping = FUTURE_MESSAGE(Eq("PING"), _, _);
Clock::advance(master::SLAVE_PING_TIMEOUT);
}
Clock::advance(master::SLAVE_PING_TIMEOUT);
AWAIT_READY(slaveLost);
driver.stop();
driver.join();
Shutdown();
Clock::resume();
}
示例13: exec
TEST_F(GarbageCollectorIntegrationTest, DiskUsage)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
MockExecutor exec(DEFAULT_EXECUTOR_ID);
TestContainerizer containerizer(&exec);
Future<SlaveRegisteredMessage> slaveRegisteredMessage =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
slave::Flags flags = CreateSlaveFlags();
Try<PID<Slave> > slave = StartSlave(&containerizer, flags);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegisteredMessage);
SlaveID slaveId = slaveRegisteredMessage.get().slave_id();
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(_, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Resources resources = Resources::parse(flags.resources.get()).get();
double cpus = resources.get<Value::Scalar>("cpus").get().value();
double mem = resources.get<Value::Scalar>("mem").get().value();
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(LaunchTasks(DEFAULT_EXECUTOR_INFO, 1, cpus, mem, "*"))
.WillRepeatedly(Return()); // Ignore subsequent offers.
EXPECT_CALL(exec, registered(_, _, _, _))
.Times(1);
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(_, _))
.WillOnce(FutureArg<1>(&status));
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(status);
EXPECT_EQ(TASK_RUNNING, status.get().state());
const std::string& executorDir = slave::paths::getExecutorPath(
flags.work_dir, slaveId, frameworkId.get(), DEFAULT_EXECUTOR_ID);
ASSERT_TRUE(os::exists(executorDir));
Clock::pause();
// Kiling the executor will cause the slave to schedule its
// directory to get garbage collected.
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
Future<Nothing> schedule =
FUTURE_DISPATCH(_, &GarbageCollectorProcess::schedule);
EXPECT_CALL(sched, statusUpdate(_, _))
.Times(AtMost(1)); // Ignore TASK_LOST from killed executor.
// Kill the executor and inform the slave.
containerizer.destroy(frameworkId.get(), DEFAULT_EXECUTOR_ID);
AWAIT_READY(schedule);
Clock::settle(); // Wait for GarbageCollectorProcess::schedule to complete.
// We advance the clock here so that the 'removalTime' of the
// executor directory is definitely less than 'flags.gc_delay' in
// the GarbageCollectorProcess 'GarbageCollector::prune()' gets
// called (below). Otherwise, due to double comparison precision
// in 'prune()' the directory might not be deleted.
Clock::advance(Seconds(1));
Future<Nothing> _checkDiskUsage =
FUTURE_DISPATCH(_, &Slave::_checkDiskUsage);
// Simulate a disk full message to the slave.
process::dispatch(
slave.get(),
&Slave::_checkDiskUsage,
Try<double>(1.0 - slave::GC_DISK_HEADROOM));
AWAIT_READY(_checkDiskUsage);
Clock::settle(); // Wait for Slave::_checkDiskUsage to complete.
// Executor's directory should be gc'ed by now.
ASSERT_FALSE(os::exists(executorDir));
//.........这里部分代码省略.........
示例14: driver
// This test launches a command task which has checkpoint enabled, and
// agent is terminated when the task is running, after agent is restarted,
// kill the task and then verify we can receive TASK_KILLED for the task.
TEST_F(CniIsolatorTest, ROOT_SlaveRecovery)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "network/cni";
flags.network_cni_plugins_dir = cniPluginDir;
flags.network_cni_config_dir = cniConfigDir;
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
// Enable checkpointing for the framework.
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_EQ(1u, offers->size());
const Offer& offer = offers.get()[0];
CommandInfo command;
command.set_value("sleep 1000");
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128").get(),
command);
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
// Make sure the container join the mock CNI network.
container->add_network_infos()->set_name("__MESOS_TEST__");
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusKilled;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusKilled));
EXPECT_CALL(sched, offerRescinded(&driver, _))
.Times(AtMost(1));
Future<Nothing> ack =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
driver.launchTasks(offer.id(), {task});
AWAIT_READY(statusRunning);
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
// Wait for the ACK to be checkpointed.
AWAIT_READY(ack);
// Stop the slave after TASK_RUNNING is received.
slave.get()->terminate();
// Restart the slave.
slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
// Kill the task.
driver.killTask(task.task_id());
AWAIT_READY(statusKilled);
EXPECT_EQ(task.task_id(), statusKilled->task_id());
EXPECT_EQ(TASK_KILLED, statusKilled->state());
driver.stop();
driver.join();
}
示例15: exec
TEST_F(StatusUpdateManagerTest, CheckpointStatusUpdate)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
MockExecutor exec(DEFAULT_EXECUTOR_ID);
slave::Flags flags = CreateSlaveFlags();
flags.checkpoint = true;
Try<PID<Slave> > slave = StartSlave(&exec, flags);
ASSERT_SOME(slave);
FrameworkInfo frameworkInfo; // Bug in gcc 4.1.*, must assign on next line.
frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true); // Enable checkpointing.
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get(), DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _))
.Times(1);
Future<vector<Offer> > offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
EXPECT_CALL(exec, registered(_, _, _, _))
.Times(1);
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(_, _))
.WillOnce(FutureArg<1>(&status));
Future<Nothing> _statusUpdateAcknowledgement =
FUTURE_DISPATCH(slave.get(), &Slave::_statusUpdateAcknowledgement);
driver.launchTasks(offers.get()[0].id(), createTasks(offers.get()[0]));
AWAIT_READY(status);
EXPECT_EQ(TASK_RUNNING, status.get().state());
AWAIT_READY(_statusUpdateAcknowledgement);
// Ensure that both the status update and its acknowledgement are
// correctly checkpointed.
Try<list<string> > found = os::find(flags.work_dir, TASK_UPDATES_FILE);
ASSERT_SOME(found);
ASSERT_EQ(1u, found.get().size());
Try<int> fd = os::open(found.get().front(), O_RDONLY);
ASSERT_SOME(fd);
int updates = 0;
int acks = 0;
string uuid;
Result<StatusUpdateRecord> record = None();
while (true) {
record = ::protobuf::read<StatusUpdateRecord>(fd.get());
ASSERT_FALSE(record.isError());
if (record.isNone()) { // Reached EOF.
break;
}
if (record.get().type() == StatusUpdateRecord::UPDATE) {
EXPECT_EQ(TASK_RUNNING, record.get().update().status().state());
uuid = record.get().update().uuid();
updates++;
} else {
EXPECT_EQ(uuid, record.get().uuid());
acks++;
}
}
ASSERT_EQ(1, updates);
ASSERT_EQ(1, acks);
close(fd.get());
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
Shutdown();
}