本文整理汇总了C++中Future::task_id方法的典型用法代码示例。如果您正苦于以下问题:C++ Future::task_id方法的具体用法?C++ Future::task_id怎么用?C++ Future::task_id使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Future
的用法示例。
在下文中一共展示了Future::task_id方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: foreach
foreach (const Future<TaskStatus>& taskStatus, taskStatuses) {
AWAIT_READY(taskStatus);
Option<TaskState> taskState = taskStates.get(taskStatus->task_id());
ASSERT_SOME(taskState);
switch (taskState.get()) {
case TASK_STAGING: {
ASSERT_EQ(TASK_STARTING, taskStatus->state())
<< taskStatus->DebugString();
taskStates[taskStatus->task_id()] = TASK_STARTING;
break;
}
case TASK_STARTING: {
ASSERT_EQ(TASK_RUNNING, taskStatus->state())
<< taskStatus->DebugString();
taskStates[taskStatus->task_id()] = TASK_RUNNING;
break;
}
default: {
FAIL() << "Unexpected task update: " << taskStatus->DebugString();
break;
}
}
}
示例2: fetcher
// This test verifies that disk quota isolator recovers properly after
// the slave restarts.
TEST_F(DiskQuotaTest, SlaveRecovery)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/cpu,posix/mem,disk/du";
flags.container_disk_watch_interval = Milliseconds(1);
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
// Enable checkpointing for the framework.
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
const Offer& offer = offers.get()[0];
// Create a task that uses 2MB disk.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:3").get(),
"dd if=/dev/zero of=file bs=1048576 count=2 && sleep 1000");
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offer.id(), {task});
AWAIT_READY(status);
EXPECT_EQ(task.task_id(), status->task_id());
EXPECT_EQ(TASK_RUNNING, status->state());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
const ContainerID& containerId = *(containers->begin());
// Stop the slave.
slave.get()->terminate();
Future<ReregisterExecutorMessage> reregisterExecutorMessage =
FUTURE_PROTOBUF(ReregisterExecutorMessage(), _, _);
Future<Nothing> _recover = FUTURE_DISPATCH(_, &Slave::_recover);
_containerizer = MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
containerizer.reset(_containerizer.get());
detector = master.get()->createDetector();
slave = StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
Clock::pause();
AWAIT_READY(_recover);
// Wait for slave to schedule reregister timeout.
Clock::settle();
// Ensure the executor re-registers before completing recovery.
//.........这里部分代码省略.........
示例3: fetcher
// Test that memory pressure listening is restarted after recovery.
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
// We only care about memory cgroup for this test.
flags.isolation = "cgroups/mem";
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
// Enable checkpointing for the framework.
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
// Run a task that triggers memory pressure event. We request 1G
// disk because we are going to write a 512 MB file repeatedly.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:256;disk:1024").get(),
"while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
Future<TaskStatus> starting;
Future<TaskStatus> running;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&starting))
.WillOnce(FutureArg<1>(&running))
.WillRepeatedly(Return()); // Ignore subsequent updates.
Future<Nothing> runningAck =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
Future<Nothing> startingAck =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(starting);
EXPECT_EQ(task.task_id(), starting->task_id());
EXPECT_EQ(TASK_STARTING, starting->state());
AWAIT_READY(startingAck);
AWAIT_READY(running);
EXPECT_EQ(task.task_id(), running->task_id());
EXPECT_EQ(TASK_RUNNING, running->state());
// Wait for the ACK to be checkpointed.
AWAIT_READY_FOR(runningAck, Seconds(120));
// We restart the slave to let it recover.
slave.get()->terminate();
// Set up so we can wait until the new slave updates the container's
// resources (this occurs after the executor has re-registered).
Future<Nothing> update =
FUTURE_DISPATCH(_, &MesosContainerizerProcess::update);
// Use the same flags.
_containerizer = MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
containerizer.reset(_containerizer.get());
Future<SlaveReregisteredMessage> reregistered =
FUTURE_PROTOBUF(SlaveReregisteredMessage(), master.get()->pid, _);
slave = StartSlave(detector.get(), containerizer.get(), flags);
//.........这里部分代码省略.........
示例4: driver
// This test checks the behavior of passed invalid limits.
TEST_F(PosixRLimitsIsolatorTest, InvalidLimits)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/rlimits";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_NE(0u, offers->size());
TaskInfo task = createTask(
offers.get()[0].slave_id(),
offers.get()[0].resources(),
"true");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
// Set impossible limit soft > hard.
RLimitInfo rlimitInfo;
RLimitInfo::RLimit* rlimit = rlimitInfo.add_rlimits();
rlimit->set_type(RLimitInfo::RLimit::RLMT_CPU);
rlimit->set_soft(100);
rlimit->set_hard(1);
container->mutable_rlimit_info()->CopyFrom(rlimitInfo);
Future<TaskStatus> taskStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&taskStatus));
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(taskStatus);
EXPECT_EQ(task.task_id(), taskStatus->task_id());
EXPECT_EQ(TASK_FAILED, taskStatus->state());
EXPECT_EQ(TaskStatus::REASON_EXECUTOR_TERMINATED, taskStatus->reason());
driver.stop();
driver.join();
}
示例5: fetcher
// Tests that the logrotate container logger only closes FDs when it
// is supposed to and does not interfere with other FDs on the agent.
TEST_F(ContainerLoggerTest, LOGROTATE_ModuleFDOwnership)
{
// Create a master, agent, and framework.
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Future<SlaveRegisteredMessage> slaveRegisteredMessage =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
// We'll need access to these flags later.
slave::Flags flags = CreateSlaveFlags();
// Use the non-default container logger that rotates logs.
flags.container_logger = LOGROTATE_CONTAINER_LOGGER_NAME;
Fetcher fetcher(flags);
// We use an actual containerizer + executor since we want something to run.
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, false, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegisteredMessage);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
// Wait for an offer, and start a task.
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
// Start a task that will keep running until the end of the test.
TaskInfo task = createTask(offers.get()[0], "sleep 100");
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusKilled;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusKilled))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(statusStarting);
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
// Open multiple files, so that we're fairly certain we've opened
// the same FDs (integers) opened by the container logger.
vector<int> fds;
for (int i = 0; i < 50; i++) {
Try<int> fd = os::open(os::DEV_NULL, O_RDONLY);
ASSERT_SOME(fd);
fds.push_back(fd.get());
}
// Kill the task, which also kills the executor.
driver.killTask(statusRunning->task_id());
AWAIT_READY(statusKilled);
EXPECT_EQ(TASK_KILLED, statusKilled->state());
Future<Nothing> executorTerminated =
FUTURE_DISPATCH(_, &Slave::executorTerminated);
AWAIT_READY(executorTerminated);
// Close all the FDs we opened. Every `close` should succeed.
foreach (int fd, fds) {
ASSERT_SOME(os::close(fd));
}
示例6: driver
// This test launches a container which has an image and joins host
// network, and then verifies that the container can access Internet.
TEST_F(CniIsolatorTest, ROOT_INTERNET_CURL_LaunchContainerInHostNetwork)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "docker/runtime,filesystem/linux";
flags.image_providers = "docker";
flags.docker_store_dir = path::join(sandbox.get(), "store");
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_EQ(1u, offers->size());
const Offer& offer = offers.get()[0];
// NOTE: We use a non-shell command here because 'sh' might not be
// in the PATH. 'alpine' does not specify env PATH in the image.
CommandInfo command;
command.set_shell(false);
command.set_value("/bin/ping");
command.add_arguments("/bin/ping");
command.add_arguments("-c1");
command.add_arguments("google.com");
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128").get(),
command);
Image image;
image.set_type(Image::DOCKER);
image.mutable_docker()->set_name("alpine");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
container->mutable_mesos()->mutable_image()->CopyFrom(image);
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
driver.launchTasks(offer.id(), {task});
AWAIT_READY_FOR(statusRunning, Seconds(60));
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(task.task_id(), statusFinished->task_id());
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
driver.stop();
driver.join();
}
示例7: StartMaster
TEST_F(ResourceOffersTest, ResourcesGetReofferedAfterTaskInfoError)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get());
ASSERT_SOME(slave);
MockScheduler sched1;
MesosSchedulerDriver driver1(
&sched1, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched1, registered(&driver1, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched1, resourceOffers(&driver1, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver1.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
TaskInfo task;
task.set_name("");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id());
task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO);
Resource* cpus = task.add_resources();
cpus->set_name("cpus");
cpus->set_type(Value::SCALAR);
cpus->mutable_scalar()->set_value(-1);
Resource* mem = task.add_resources();
mem->set_name("mem");
mem->set_type(Value::SCALAR);
mem->mutable_scalar()->set_value(static_cast<double>(Gigabytes(1).bytes()));
vector<TaskInfo> tasks;
tasks.push_back(task);
Future<TaskStatus> status;
EXPECT_CALL(sched1, statusUpdate(&driver1, _))
.WillOnce(FutureArg<1>(&status));
driver1.launchTasks(offers.get()[0].id(), tasks);
AWAIT_READY(status);
EXPECT_EQ(task.task_id(), status->task_id());
EXPECT_EQ(TASK_ERROR, status->state());
EXPECT_EQ(TaskStatus::REASON_TASK_INVALID, status->reason());
EXPECT_TRUE(status->has_message());
EXPECT_TRUE(strings::contains(status->message(), "Invalid scalar resource"))
<< status->message();
MockScheduler sched2;
MesosSchedulerDriver driver2(
&sched2, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched2, registered(&driver2, _, _));
EXPECT_CALL(sched2, resourceOffers(&driver2, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver2.start();
AWAIT_READY(offers);
driver1.stop();
driver1.join();
driver2.stop();
driver2.join();
}
示例8: fetcher
// Verify that we can get accurate resource statistics from the XFS
// disk isolator.
TEST_F(ROOT_XFS_QuotaTest, ResourceStatistics)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
Fetcher fetcher(flags);
Owned<MasterDetector> detector = master.get()->createDetector();
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
// Create a task that uses 4 of 3MB disk but doesn't fail. We will verify
// that the allocated disk is filled.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:3").get(),
"dd if=/dev/zero of=file bs=1048576 count=4 || sleep 1000");
Future<TaskStatus> startingStatus;
Future<TaskStatus> runningStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&startingStatus))
.WillOnce(FutureArg<1>(&runningStatus))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), startingStatus->task_id());
EXPECT_EQ(TASK_STARTING, startingStatus->state());
AWAIT_READY(runningStatus);
EXPECT_EQ(task.task_id(), runningStatus->task_id());
EXPECT_EQ(TASK_RUNNING, runningStatus->state());
Future<hashset<ContainerID>> containers = containerizer.get()->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *(containers->begin());
Timeout timeout = Timeout::in(Seconds(5));
while (true) {
Future<ResourceStatistics> usage = containerizer.get()->usage(containerId);
AWAIT_READY(usage);
ASSERT_TRUE(usage->has_disk_limit_bytes());
EXPECT_EQ(Megabytes(3), Bytes(usage->disk_limit_bytes()));
if (usage->has_disk_used_bytes()) {
// Usage must always be <= the limit.
EXPECT_LE(usage->disk_used_bytes(), usage->disk_limit_bytes());
// Usage might not be equal to the limit, but it must hit
// and not exceed the limit.
if (usage->disk_used_bytes() >= usage->disk_limit_bytes()) {
EXPECT_EQ(
usage->disk_used_bytes(), usage->disk_limit_bytes());
EXPECT_EQ(Megabytes(3), Bytes(usage->disk_used_bytes()));
break;
}
}
ASSERT_FALSE(timeout.expired());
os::sleep(Milliseconds(100));
}
driver.stop();
driver.join();
}
示例9: driver
// This is the same logic as DiskUsageExceedsQuota except we turn off disk quota
// enforcement, so exceeding the quota should be allowed.
TEST_F(ROOT_XFS_QuotaTest, DiskUsageExceedsQuotaNoEnforce)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
slave::Flags flags = CreateSlaveFlags();
flags.enforce_container_disk_quota = false;
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
const Offer& offer = offers.get()[0];
// Create a task which requests 1MB disk, but actually uses more
// than 2MB disk.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:1").get(),
"dd if=/dev/zero of=file bs=1048576 count=2");
Future<TaskStatus> startingStatus;
Future<TaskStatus> runningStatus;
Future<TaskStatus> finishedStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&startingStatus))
.WillOnce(FutureArg<1>(&runningStatus))
.WillOnce(FutureArg<1>(&finishedStatus));
driver.launchTasks(offer.id(), {task});
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), startingStatus->task_id());
EXPECT_EQ(TASK_STARTING, startingStatus->state());
AWAIT_READY(runningStatus);
EXPECT_EQ(task.task_id(), runningStatus->task_id());
EXPECT_EQ(TASK_RUNNING, runningStatus->state());
// We expect the task to succeed even though it exceeded
// the disk quota.
AWAIT_READY(finishedStatus);
EXPECT_EQ(task.task_id(), finishedStatus->task_id());
EXPECT_EQ(TASK_FINISHED, finishedStatus->state());
driver.stop();
driver.join();
}
示例10: driver
// Tests that the task fails when it attempts to write to a persistent volume
// mounted as read-only. Note that although we use a shared persistent volume,
// the behavior is the same for non-shared persistent volumes.
TEST_F(LinuxFilesystemIsolatorMesosTest,
ROOT_WriteAccessSharedPersistentVolumeReadOnlyMode)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.resources = "cpus:2;mem:128;disk(role1):128";
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_roles(0, "role1");
frameworkInfo.add_capabilities()->set_type(
FrameworkInfo::Capability::SHARED_RESOURCES);
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
// We create a shared volume which shall be used by the task to
// write to that volume.
Resource volume = createPersistentVolume(
Megabytes(4),
"role1",
"id1",
"volume_path",
None(),
None(),
frameworkInfo.principal(),
true); // Shared volume.
// The task uses the shared volume as read-only.
Resource roVolume = volume;
roVolume.mutable_disk()->mutable_volume()->set_mode(Volume::RO);
Resources taskResources =
Resources::parse("cpus:1;mem:64;disk(role1):1").get() + roVolume;
TaskInfo task = createTask(
offers.get()[0].slave_id(),
taskResources,
"echo hello > volume_path/file");
// The task fails to write to the volume since the task's resources
// intends to use the volume as read-only.
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFailed;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFailed));
driver.acceptOffers(
{offers.get()[0].id()},
{CREATE(volume),
LAUNCH({task})});
AWAIT_READY(statusStarting);
EXPECT_EQ(task.task_id(), statusStarting->task_id());
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFailed);
EXPECT_EQ(task.task_id(), statusFailed->task_id());
EXPECT_EQ(TASK_FAILED, statusFailed->state());
//.........这里部分代码省略.........
示例11: driver
// This test verifies that a task group is launched on the agent if the executor
// provides a valid authentication token specifying its own ContainerID.
TEST_F(ExecutorAuthorizationTest, RunTaskGroup)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
// Start an agent with permissive ACLs so that a task can be launched.
ACLs acls;
acls.set_permissive(true);
slave::Flags flags = CreateSlaveFlags();
flags.acls = acls;
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:0.5;mem:32").get(),
"sleep 1000");
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
Resources executorResources =
allocatedResources(Resources::parse("cpus:0.1;mem:32;disk:32").get(), "*");
ExecutorInfo executor;
executor.mutable_executor_id()->set_value("default");
executor.set_type(ExecutorInfo::DEFAULT);
executor.mutable_framework_id()->CopyFrom(frameworkId.get());
executor.mutable_resources()->CopyFrom(executorResources);
TaskGroupInfo taskGroup;
taskGroup.add_tasks()->CopyFrom(task);
driver.acceptOffers({offer.id()}, {LAUNCH_GROUP(executor, taskGroup)});
AWAIT_READY(status);
ASSERT_EQ(task.task_id(), status->task_id());
EXPECT_EQ(TASK_STARTING, status->state());
driver.stop();
driver.join();
}
示例12: driver
// This test verifies that docker image default entrypoint is executed
// correctly using registry puller. This corresponds to the case in runtime
// isolator logic table: sh=0, value=0, argv=1, entrypoint=1, cmd=0.
TEST_F(DockerRuntimeIsolatorTest,
ROOT_CURL_INTERNET_DockerDefaultEntryptRegistryPuller)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "docker/runtime,filesystem/linux";
flags.image_providers = "docker";
flags.docker_store_dir = path::join(os::getcwd(), "store");
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_EQ(1u, offers->size());
const Offer& offer = offers.get()[0];
TaskInfo task;
task.set_name("test-task");
task.mutable_task_id()->set_value(UUID::random().toString());
task.mutable_slave_id()->CopyFrom(offer.slave_id());
task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get());
task.mutable_command()->set_shell(false);
task.mutable_command()->add_arguments("hello world");
Image image;
image.set_type(Image::DOCKER);
// 'mesosphere/inky' image is used in docker containerizer test, which
// contains entrypoint as 'echo' and cmd as null.
image.mutable_docker()->set_name("mesosphere/inky");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
container->mutable_mesos()->mutable_image()->CopyFrom(image);
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
driver.launchTasks(offer.id(), {task});
AWAIT_READY_FOR(statusRunning, Seconds(60));
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(task.task_id(), statusFinished->task_id());
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
driver.stop();
driver.join();
}
示例13: driver
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
// We only care about memory cgroup for this test.
flags.isolation = "cgroups/mem";
flags.agent_subsystems = None();
Fetcher fetcher;
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
Offer offer = offers.get()[0];
// Run a task that triggers memory pressure event. We request 1G
// disk because we are going to write a 512 MB file repeatedly.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:256;disk:1024").get(),
"while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
Future<TaskStatus> running;
Future<TaskStatus> killed;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&running))
.WillOnce(FutureArg<1>(&killed))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offer.id(), {task});
AWAIT_READY(running);
EXPECT_EQ(task.task_id(), running.get().task_id());
EXPECT_EQ(TASK_RUNNING, running.get().state());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers.get().size());
ContainerID containerId = *(containers.get().begin());
// Wait a while for some memory pressure events to occur.
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage = containerizer->usage(containerId);
AWAIT_READY(usage);
if (usage.get().mem_low_pressure_counter() > 0) {
// We will check the correctness of the memory pressure counters
// later, because the memory-hammering task is still active
// and potentially incrementing these counters.
break;
}
os::sleep(Milliseconds(100));
waited += Milliseconds(100);
} while (waited < Seconds(5));
EXPECT_LE(waited, Seconds(5));
// Pause the clock to ensure that the reaper doesn't reap the exited
// command executor and inform the containerizer/slave.
Clock::pause();
Clock::settle();
// Stop the memory-hammering task.
driver.killTask(task.task_id());
AWAIT_READY_FOR(killed, Seconds(120));
EXPECT_EQ(task.task_id(), killed->task_id());
EXPECT_EQ(TASK_KILLED, killed->state());
//.........这里部分代码省略.........
示例14: driver
// This test confirms that if a task exceeds configured resource
// limits it is forcibly terminated.
TEST_F(PosixRLimitsIsolatorTest, TaskExceedingLimit)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/rlimits";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
// The task attempts to use an infinite amount of CPU time.
TaskInfo task = createTask(
offers.get()[0].slave_id(),
offers.get()[0].resources(),
"while true; do true; done");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
// Limit the process to use maximally 1 second of CPU time.
RLimitInfo rlimitInfo;
RLimitInfo::RLimit* cpuLimit = rlimitInfo.add_rlimits();
cpuLimit->set_type(RLimitInfo::RLimit::RLMT_CPU);
cpuLimit->set_soft(1);
cpuLimit->set_hard(1);
container->mutable_rlimit_info()->CopyFrom(rlimitInfo);
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFailed;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFailed));
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(statusRunning);
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFailed);
EXPECT_EQ(task.task_id(), statusFailed->task_id());
EXPECT_EQ(TASK_FAILED, statusFailed->state());
driver.stop();
driver.join();
}