本文整理汇总了C++中Future::state方法的典型用法代码示例。如果您正苦于以下问题:C++ Future::state方法的具体用法?C++ Future::state怎么用?C++ Future::state使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Future
的用法示例。
在下文中一共展示了Future::state方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: driver
// This test confirms that setting no values for the soft and hard
// limits implies an unlimited resource.
TEST_F(PosixRLimitsIsolatorTest, UnsetLimits) {
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/rlimits";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_NE(0u, offers->size());
TaskInfo task = createTask(
offers.get()[0].slave_id(),
offers.get()[0].resources(),
"exit `ulimit -c | grep -q unlimited`");
// Force usage of C locale as we interpret a potentially translated
// string in the task's command.
mesos::Environment::Variable* locale =
task.mutable_command()->mutable_environment()->add_variables();
locale->set_name("LC_ALL");
locale->set_value("C");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
// Setting rlimit for core without soft or hard limit signifies
// unlimited range.
RLimitInfo rlimitInfo;
RLimitInfo::RLimit* rlimit = rlimitInfo.add_rlimits();
rlimit->set_type(RLimitInfo::RLimit::RLMT_CORE);
container->mutable_rlimit_info()->CopyFrom(rlimitInfo);
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinal;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinal));
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(statusRunning);
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinal);
EXPECT_EQ(task.task_id(), statusFinal->task_id());
EXPECT_EQ(TASK_FINISHED, statusFinal->state());
driver.stop();
driver.join();
}
示例2: driver
// In this test, the agent initially doesn't enable disk isolation
// but then restarts with XFS disk isolation enabled. We verify that
// the old container launched before the agent restart is
// successfully recovered.
TEST_F(ROOT_XFS_QuotaTest, RecoverOldContainers)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
slave::Flags flags = CreateSlaveFlags();
// `CreateSlaveFlags()` enables `disk/xfs` so here we reset
// `isolation` to empty.
flags.isolation.clear();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:1").get(),
"dd if=/dev/zero of=file bs=1024 count=1; sleep 1000");
Future<TaskStatus> startingStatus;
Future<TaskStatus> runningstatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&startingStatus))
.WillOnce(FutureArg<1>(&runningstatus));
driver.launchTasks(offer.id(), {task});
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), startingStatus->task_id());
EXPECT_EQ(TASK_STARTING, startingStatus->state());
AWAIT_READY(runningstatus);
EXPECT_EQ(task.task_id(), runningstatus->task_id());
EXPECT_EQ(TASK_RUNNING, runningstatus->state());
{
Future<ResourceUsage> usage =
process::dispatch(slave.get()->pid, &Slave::usage);
AWAIT_READY(usage);
// We should have 1 executor using resources but it doesn't have
// disk limit enabled.
ASSERT_EQ(1, usage->executors().size());
const ResourceUsage_Executor& executor = usage->executors().Get(0);
ASSERT_TRUE(executor.has_statistics());
ASSERT_FALSE(executor.statistics().has_disk_limit_bytes());
}
// Restart the slave.
slave.get()->terminate();
Future<SlaveReregisteredMessage> slaveReregisteredMessage =
FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _);
// This time use the agent flags that include XFS disk isolation.
slave = StartSlave(detector.get(), CreateSlaveFlags());
ASSERT_SOME(slave);
// Wait for the slave to re-register.
AWAIT_READY(slaveReregisteredMessage);
{
Future<ResourceUsage> usage =
process::dispatch(slave.get()->pid, &Slave::usage);
AWAIT_READY(usage);
// We should still have 1 executor using resources but it doesn't
// have disk limit enabled.
ASSERT_EQ(1, usage->executors().size());
const ResourceUsage_Executor& executor = usage->executors().Get(0);
ASSERT_TRUE(executor.has_statistics());
ASSERT_FALSE(executor.statistics().has_disk_limit_bytes());
}
//.........这里部分代码省略.........
示例3: driver
// This test ensures that the command executor sends TASK_KILLING
// to frameworks that support the capability.
TEST_P(CommandExecutorTest, TaskKillingCapability)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
slave::Flags flags = CreateSlaveFlags();
flags.http_command_executor = GetParam();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
// Start the framework with the task killing capability.
FrameworkInfo::Capability capability;
capability.set_type(FrameworkInfo::Capability::TASK_KILLING_STATE);
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.add_capabilities()->CopyFrom(capability);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_EQ(1u, offers->size());
// Launch a task with the command executor.
TaskInfo task = createTask(
offers->front().slave_id(),
offers->front().resources(),
"sleep 1000");
Future<TaskStatus> statusRunning;
EXPECT_CALL(sched, statusUpdate(_, _))
.WillOnce(FutureArg<1>(&statusRunning));
driver.launchTasks(offers->front().id(), {task});
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
Future<TaskStatus> statusKilling, statusKilled;
EXPECT_CALL(sched, statusUpdate(_, _))
.WillOnce(FutureArg<1>(&statusKilling))
.WillOnce(FutureArg<1>(&statusKilled));
driver.killTask(task.task_id());
AWAIT_READY(statusKilling);
EXPECT_EQ(TASK_KILLING, statusKilling->state());
AWAIT_READY(statusKilled);
EXPECT_EQ(TASK_KILLED, statusKilled->state());
driver.stop();
driver.join();
}
示例4: fetcher
// Verify that we can get accurate resource statistics from the XFS
// disk isolator.
TEST_F(ROOT_XFS_QuotaTest, ResourceStatistics)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
Fetcher fetcher(flags);
Owned<MasterDetector> detector = master.get()->createDetector();
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
// Create a task that uses 4 of 3MB disk but doesn't fail. We will verify
// that the allocated disk is filled.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:3").get(),
"dd if=/dev/zero of=file bs=1048576 count=4 || sleep 1000");
Future<TaskStatus> startingStatus;
Future<TaskStatus> runningStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&startingStatus))
.WillOnce(FutureArg<1>(&runningStatus))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), startingStatus->task_id());
EXPECT_EQ(TASK_STARTING, startingStatus->state());
AWAIT_READY(runningStatus);
EXPECT_EQ(task.task_id(), runningStatus->task_id());
EXPECT_EQ(TASK_RUNNING, runningStatus->state());
Future<hashset<ContainerID>> containers = containerizer.get()->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *(containers->begin());
Timeout timeout = Timeout::in(Seconds(5));
while (true) {
Future<ResourceStatistics> usage = containerizer.get()->usage(containerId);
AWAIT_READY(usage);
ASSERT_TRUE(usage->has_disk_limit_bytes());
EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes()));
if (usage->has_disk_used_bytes()) {
// Usage must always be <= the limit.
EXPECT_LE(usage->disk_used_bytes(), usage->disk_limit_bytes());
// Usage might not be equal to the limit, but it must hit
// and not exceed the limit.
if (usage->disk_used_bytes() >= usage->disk_limit_bytes()) {
EXPECT_EQ(
usage->disk_used_bytes(), usage->disk_limit_bytes());
EXPECT_EQ(Megabytes(3), Bytes(usage->disk_used_bytes()));
break;
}
}
ASSERT_FALSE(timeout.expired());
os::sleep(Milliseconds(100));
}
driver.stop();
driver.join();
}
示例5: driver
// This test verifies that docker image default entrypoint is executed
// correctly using registry puller. This corresponds to the case in runtime
// isolator logic table: sh=0, value=0, argv=1, entrypoint=1, cmd=0.
TEST_F(DockerRuntimeIsolatorTest,
ROOT_CURL_INTERNET_DockerDefaultEntryptRegistryPuller)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "docker/runtime,filesystem/linux";
flags.image_providers = "docker";
flags.docker_registry = "https://registry-1.docker.io";
flags.docker_store_dir = path::join(os::getcwd(), "store");
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_EQ(1u, offers->size());
const Offer& offer = offers.get()[0];
TaskInfo task;
task.set_name("test-task");
task.mutable_task_id()->set_value(UUID::random().toString());
task.mutable_slave_id()->CopyFrom(offer.slave_id());
task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get());
task.mutable_command()->set_shell(false);
task.mutable_command()->add_arguments("hello world");
Image image;
image.set_type(Image::DOCKER);
// 'mesosphere/inky' image is used in docker containerizer test, which
// contains entrypoint as 'echo' and cmd as null.
image.mutable_docker()->set_name("mesosphere/inky");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
container->mutable_mesos()->mutable_image()->CopyFrom(image);
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
driver.launchTasks(offer.id(), {task});
AWAIT_READY_FOR(statusRunning, Seconds(60));
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(task.task_id(), statusFinished->task_id());
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
driver.stop();
driver.join();
}
示例6: exec
//.........这里部分代码省略.........
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
// Since the agent may have retried registration, we want to
// ensure that any duplicate registrations are flushed before
// we appoint the master again. Otherwise, the agent may
// receive a stale registration message.
Clock::pause();
Clock::settle();
Clock::resume();
// Trigger a re-registration of the slave and capture the message
// so that we can spoof a race with a launch task message.
DROP_PROTOBUFS(ReregisterSlaveMessage(), slave.get()->pid, master.get()->pid);
Future<ReregisterSlaveMessage> reregisterSlaveMessage =
DROP_PROTOBUF(
ReregisterSlaveMessage(),
slave.get()->pid,
master.get()->pid);
detector.appoint(master.get()->pid);
AWAIT_READY(reregisterSlaveMessage);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
TaskInfo task;
task.set_name("test task");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id());
task.mutable_resources()->MergeFrom(offers.get()[0].resources());
task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO);
ExecutorDriver* executorDriver;
EXPECT_CALL(exec, registered(_, _, _, _))
.WillOnce(SaveArg<0>(&executorDriver));
// Leave the task in TASK_STAGING.
Future<Nothing> launchTask;
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(FutureSatisfy(&launchTask));
EXPECT_CALL(sched, statusUpdate(&driver, _))
.Times(0);
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(launchTask);
// Send the stale re-registration message, which does not contain
// the task we just launched. This will trigger a reconciliation
// by the master.
Future<SlaveReregisteredMessage> slaveReregisteredMessage =
FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _);
// Prevent this from being dropped per the DROP_PROTOBUFS above.
FUTURE_PROTOBUF(
ReregisterSlaveMessage(),
slave.get()->pid,
master.get()->pid);
process::post(
slave.get()->pid,
master.get()->pid,
reregisterSlaveMessage.get());
AWAIT_READY(slaveReregisteredMessage);
// Neither the master nor the slave should send a TASK_LOST
// as part of the reconciliation. We check this by calling
// Clock::settle() to flush all pending events.
Clock::pause();
Clock::settle();
Clock::resume();
// Now send TASK_FINISHED and make sure it's the only message
// received by the scheduler.
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
TaskStatus taskStatus;
taskStatus.mutable_task_id()->CopyFrom(task.task_id());
taskStatus.set_state(TASK_FINISHED);
executorDriver->sendStatusUpdate(taskStatus);
AWAIT_READY(status);
ASSERT_EQ(TASK_FINISHED, status->state());
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
}
示例7: driver
// This test ensures that when explicit acknowledgements are enabled,
// acknowledgements for master-generated updates are dropped by the
// driver. We test this by creating an invalid task that uses no
// resources.
TEST_F(MesosSchedulerDriverTest, ExplicitAcknowledgementsMasterGeneratedUpdate)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get());
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
false,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
// Ensure no status update acknowledgements are sent to the master.
EXPECT_NO_FUTURE_CALLS(
mesos::scheduler::Call(),
mesos::scheduler::Call::ACKNOWLEDGE,
_ ,
master.get()->pid);
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers->size());
// Launch a task using no resources.
TaskInfo task;
task.set_name("");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id());
task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO);
vector<TaskInfo> tasks;
tasks.push_back(task);
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
driver.launchTasks(offers.get()[0].id(), tasks);
AWAIT_READY(status);
ASSERT_EQ(TASK_ERROR, status->state());
ASSERT_EQ(TaskStatus::SOURCE_MASTER, status->source());
ASSERT_EQ(TaskStatus::REASON_TASK_INVALID, status->reason());
// Now send the acknowledgement.
driver.acknowledgeStatusUpdate(status.get());
// Settle the clock to ensure driver processes the acknowledgement,
// which should get dropped due to having come from the master.
Clock::pause();
Clock::settle();
driver.stop();
driver.join();
}
示例8: fetcher
TEST_P(MemoryIsolatorTest, ROOT_MemUsage)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = GetParam();
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(
detector.get(),
containerizer.get());
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
TaskInfo task = createTask(offers.get()[0], "sleep 120");
Future<TaskStatus> statusRunning;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning));
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *(containers->begin());
Future<ResourceStatistics> usage = containerizer->usage(containerId);
AWAIT_READY(usage);
// TODO(jieyu): Consider using a program that predictably increases
// RSS so that we can set more meaningful expectation here.
EXPECT_LT(0u, usage->mem_rss_bytes());
driver.stop();
driver.join();
}
示例9: driver
// Test that memory pressure listening is restarted after recovery.
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
// We only care about memory cgroup for this test.
flags.isolation = "cgroups/mem";
flags.agent_subsystems = None();
Fetcher fetcher;
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
// Enable checkpointing for the framework.
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
Offer offer = offers.get()[0];
// Run a task that triggers memory pressure event. We request 1G
// disk because we are going to write a 512 MB file repeatedly.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:256;disk:1024").get(),
"while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
Future<TaskStatus> running;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&running));
Future<Nothing> _statusUpdateAcknowledgement =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(running);
EXPECT_EQ(task.task_id(), running.get().task_id());
EXPECT_EQ(TASK_RUNNING, running.get().state());
// Wait for the ACK to be checkpointed.
AWAIT_READY(_statusUpdateAcknowledgement);
// We restart the slave to let it recover.
slave.get()->terminate();
// Set up so we can wait until the new slave updates the container's
// resources (this occurs after the executor has re-registered).
Future<Nothing> update =
FUTURE_DISPATCH(_, &MesosContainerizerProcess::update);
// Use the same flags.
_containerizer = MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
containerizer.reset(_containerizer.get());
Future<SlaveReregisteredMessage> reregistered =
FUTURE_PROTOBUF(SlaveReregisteredMessage(), master.get()->pid, _);
slave = StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
AWAIT_READY(reregistered);
// Wait until the containerizer is updated.
AWAIT_READY(update);
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers.get().size());
//.........这里部分代码省略.........
示例10: driver
// This test verifies that the framework can launch a command task
// that specifies both container image and persistent volumes.
TEST_F(LinuxFilesystemIsolatorMesosTest,
ROOT_ChangeRootFilesystemCommandExecutorPersistentVolume)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.resources = "cpus:2;mem:1024;disk(role1):1024";
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_roles(0, "role1");
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
master.get()->pid,
DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
string dir1 = path::join(sandbox.get(), "dir1");
ASSERT_SOME(os::mkdir(dir1));
Resource persistentVolume = createPersistentVolume(
Megabytes(64),
"role1",
"id1",
"path1",
None(),
None(),
frameworkInfo.principal());
// We use the filter explicitly here so that the resources will not
// be filtered for 5 seconds (the default).
Filters filters;
filters.set_refuse_seconds(0);
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:512").get() + persistentVolume,
"echo abc > path1/file");
task.mutable_container()->CopyFrom(createContainerInfo(
"test_image",
{createVolumeHostPath("/tmp", dir1, Volume::RW)}));
// Create the persistent volumes and launch task via `acceptOffers`.
driver.acceptOffers(
{offer.id()},
{CREATE(persistentVolume), LAUNCH({task})},
filters);
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
AWAIT_READY(statusStarting);
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
//.........这里部分代码省略.........
示例11: fetcher
//.........这里部分代码省略.........
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
string dir1 = path::join(sandbox.get(), "dir1");
ASSERT_SOME(os::mkdir(dir1));
Resource persistentVolume = createPersistentVolume(
Megabytes(64),
"role1",
"id1",
"path1",
None(),
None(),
frameworkInfo.principal());
// Create a task that does nothing for a long time.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:512").get() + persistentVolume,
"sleep 1000");
task.mutable_container()->CopyFrom(createContainerInfo(
"test_image",
{createVolumeHostPath("/tmp", dir1, Volume::RW)}));
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillRepeatedly(DoDefault());
Future<Nothing> ack =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
// Create the persistent volumes and launch task via `acceptOffers`.
driver.acceptOffers(
{offer.id()},
{CREATE(persistentVolume), LAUNCH({task})});
AWAIT_READY(statusStarting);
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
// Wait for the ACK to be checkpointed.
AWAIT_READY(ack);
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *containers->begin();
// Restart the slave.
slave.get()->terminate();
// Wipe the slave meta directory so that the slave will treat the
// above running task as an orphan.
ASSERT_SOME(os::rmdir(slave::paths::getMetaRootDir(flags.work_dir)));
Future<Nothing> _recover = FUTURE_DISPATCH(_, &Slave::_recover);
// Recreate the containerizer using the same helper as above.
containerizer.reset();
create = MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(create);
containerizer.reset(create.get());
slave = StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
// Wait until slave recovery is complete.
AWAIT_READY(_recover);
// Wait until the orphan containers are cleaned up.
AWAIT_READY(containerizer->wait(containerId));
Try<fs::MountInfoTable> table = fs::MountInfoTable::read();
ASSERT_SOME(table);
// All mount targets should be under this directory.
string directory = slave::paths::getSandboxRootDir(flags.work_dir);
// Verify that the orphaned container's persistent volume and
// the rootfs are unmounted.
foreach (const fs::MountInfoTable::Entry& entry, table->entries) {
EXPECT_FALSE(strings::contains(entry.target, directory))
<< "Target was not unmounted: " << entry.target;
}
driver.stop();
driver.join();
}
示例12: statusUpdate
EXPECT_TRUE(result.contains(expected));
}
driver1.stop();
driver1.join();
Future<TaskStatus> status2;
EXPECT_CALL(sched2, statusUpdate(&driver2, _))
.WillOnce(FutureArg<1>(&status2));
// Trigger explicit reconciliation.
driver2.reconcileTasks({status.get()});
AWAIT_READY(status2);
EXPECT_EQ(TASK_RUNNING, status2->state());
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
driver2.stop();
driver2.join();
}
// Checks that resources of an agent will be offered to non-hierarchical roles
// before/after being upgraded to support HIERARCHICAL_ROLE. We first strip
// HIERARCHICAL_ROLE capability in `registerSlaveMessage` to spoof an old agent.
// Then we simulate a master detection event to trigger agent re-registration.
// Before/after 'upgrade', resources of the agent should be offered to
// non-hierarchical roles.
示例13: execSuperhero
//.........这里部分代码省略.........
Future<vector<Offer>> offersSuperhero;
EXPECT_CALL(schedSuperhero, resourceOffers(&driverSuperhero, _))
.WillOnce(FutureArg<1>(&offersSuperhero))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driverSuperhero.start();
AWAIT_READY(frameworkIdSuperhero);
AWAIT_READY(offersSuperhero);
ASSERT_FALSE(offersSuperhero->empty());
// Define a task which will run on executorSuperhero of frameworkSuperhero.
TaskInfo taskSuperhero;
taskSuperhero.set_name("test-" + roleSuperhero);
taskSuperhero.mutable_task_id()->set_value("1");
taskSuperhero.mutable_slave_id()->MergeFrom(
offersSuperhero.get()[0].slave_id());
taskSuperhero.mutable_resources()->MergeFrom(
offersSuperhero.get()[0].resources());
taskSuperhero.mutable_executor()->MergeFrom(executorSuperhero);
EXPECT_CALL(execSuperhero, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING))
.WillRepeatedly(Return());
Future<TaskStatus> statusSuperhero;
EXPECT_CALL(schedSuperhero, statusUpdate(&driverSuperhero, _))
.WillOnce(FutureArg<1>(&statusSuperhero));
driverSuperhero.launchTasks(offersSuperhero.get()[0].id(), {taskSuperhero});
AWAIT_READY(statusSuperhero);
EXPECT_EQ(TASK_RUNNING, statusSuperhero->state());
MockScheduler schedMuggle;
MesosSchedulerDriver driverMuggle(
&schedMuggle,
frameworkMuggle,
master.get()->pid,
DEFAULT_CREDENTIAL_2);
EXPECT_CALL(execMuggle, registered(_, _, _, _))
.Times(AtMost(1));
Future<FrameworkID> frameworkIdMuggle;
EXPECT_CALL(schedMuggle, registered(&driverMuggle, _, _))
.WillOnce(FutureArg<1>(&frameworkIdMuggle));
Future<vector<Offer>> offersMuggle;
EXPECT_CALL(schedMuggle, resourceOffers(&driverMuggle, _))
.WillOnce(FutureArg<1>(&offersMuggle))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driverMuggle.start();
AWAIT_READY(frameworkIdMuggle);
AWAIT_READY(offersMuggle);
ASSERT_FALSE(offersMuggle->empty());
// Define a task which will run on executorMuggle of frameworkMuggle.
TaskInfo taskMuggle;
taskMuggle.set_name("test-" + roleMuggle);
taskMuggle.mutable_task_id()->set_value("2");
taskMuggle.mutable_slave_id()->MergeFrom(
示例14: driver
// This test verifies that a task group is launched on the agent if the executor
// provides a valid authentication token specifying its own ContainerID.
TEST_F(ExecutorAuthorizationTest, RunTaskGroup)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
// Start an agent with permissive ACLs so that a task can be launched.
ACLs acls;
acls.set_permissive(true);
slave::Flags flags = CreateSlaveFlags();
flags.acls = acls;
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:0.5;mem:32").get(),
"sleep 1000");
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
Resources executorResources =
allocatedResources(Resources::parse("cpus:0.1;mem:32;disk:32").get(), "*");
ExecutorInfo executor;
executor.mutable_executor_id()->set_value("default");
executor.set_type(ExecutorInfo::DEFAULT);
executor.mutable_framework_id()->CopyFrom(frameworkId.get());
executor.mutable_resources()->CopyFrom(executorResources);
TaskGroupInfo taskGroup;
taskGroup.add_tasks()->CopyFrom(task);
driver.acceptOffers({offer.id()}, {LAUNCH_GROUP(executor, taskGroup)});
AWAIT_READY(status);
ASSERT_EQ(task.task_id(), status->task_id());
EXPECT_EQ(TASK_STARTING, status->state());
driver.stop();
driver.join();
}
示例15: fetcher
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
// We only care about memory cgroup for this test.
flags.isolation = "cgroups/mem";
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
// Run a task that triggers memory pressure event. We request 1G
// disk because we are going to write a 512 MB file repeatedly.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:256;disk:1024").get(),
"while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
Future<TaskStatus> starting;
Future<TaskStatus> running;
Future<TaskStatus> killed;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&starting))
.WillOnce(FutureArg<1>(&running))
.WillOnce(FutureArg<1>(&killed))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offer.id(), {task});
AWAIT_READY(starting);
EXPECT_EQ(task.task_id(), starting->task_id());
EXPECT_EQ(TASK_STARTING, starting->state());
AWAIT_READY(running);
EXPECT_EQ(task.task_id(), running->task_id());
EXPECT_EQ(TASK_RUNNING, running->state());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *(containers->begin());
// Wait a while for some memory pressure events to occur.
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage = containerizer->usage(containerId);
AWAIT_READY(usage);
if (usage->mem_low_pressure_counter() > 0) {
// We will check the correctness of the memory pressure counters
// later, because the memory-hammering task is still active
// and potentially incrementing these counters.
break;
}
os::sleep(Milliseconds(100));
waited += Milliseconds(100);
} while (waited < Seconds(5));
EXPECT_LE(waited, Seconds(5));
// Pause the clock to ensure that the reaper doesn't reap the exited
// command executor and inform the containerizer/slave.
Clock::pause();
Clock::settle();
// Stop the memory-hammering task.
//.........这里部分代码省略.........