本文整理汇总了C++中Future::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ Future::empty方法的具体用法?C++ Future::empty怎么用?C++ Future::empty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Future
的用法示例。
在下文中一共展示了Future::empty方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: driver
// This test confirms that rlimits are set for nested containers.
TEST_F(PosixRLimitsIsolatorTest, NestedContainers)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/rlimits";
#ifndef USE_SSL_SOCKET
// Disable operator API authentication for the default executor.
// Executor authentication currently has SSL as a dependency, so we
// cannot require executors to authenticate with the agent operator
// API if Mesos was not built with SSL support.
flags.authenticate_http_readwrite = false;
#endif // USE_SSL_SOCKET
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Future<TaskStatus> taskStatuses[4];
{
// This variable doesn't have to be used explicitly.
testing::InSequence inSequence;
foreach (Future<TaskStatus>& taskStatus, taskStatuses) {
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&taskStatus));
}
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillRepeatedly(Return()); // Ignore subsequent updates.
}
示例2: StartMaster
TEST_F(ResourceOffersTest, ResourcesGetReofferedWhenUnused)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get());
ASSERT_SOME(slave);
MockScheduler sched1;
MesosSchedulerDriver driver1(
&sched1, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched1, registered(&driver1, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched1, resourceOffers(&driver1, _))
.WillOnce(FutureArg<1>(&offers));
driver1.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
vector<TaskInfo> tasks; // Use nothing!
driver1.launchTasks(offers.get()[0].id(), tasks);
MockScheduler sched2;
MesosSchedulerDriver driver2(
&sched2, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched2, registered(&driver2, _, _));
EXPECT_CALL(sched2, resourceOffers(&driver2, _))
.WillOnce(FutureArg<1>(&offers));
driver2.start();
AWAIT_READY(offers);
// Stop first framework before second so no offers are sent.
driver1.stop();
driver1.join();
driver2.stop();
driver2.join();
}
示例3: driver
TEST_F_TEMP_DISABLED_ON_WINDOWS(
ResourceOffersTest,
ResourceOfferWithMultipleSlaves)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
vector<Owned<cluster::Slave>> slaves;
// Start 10 slaves.
for (int i = 0; i < 10; i++) {
slave::Flags flags = CreateSlaveFlags();
flags.launcher = "posix";
flags.resources = Option<std::string>("cpus:2;mem:1024");
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
slaves.push_back(slave.get());
}
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // All 10 slaves might not be in first offer.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
EXPECT_GE(10u, offers->size());
Resources resources(offers.get()[0].resources());
EXPECT_EQ(2, resources.get<Value::Scalar>("cpus")->value());
EXPECT_EQ(1024, resources.get<Value::Scalar>("mem")->value());
driver.stop();
driver.join();
}
示例4: fetcher
// This test verifies that the container will not be killed if
// disk_enforce_quota flag is false (even if the disk usage exceeds
// its quota).
TEST_F(DiskQuotaTest, NoQuotaEnforcement)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/cpu,posix/mem,disk/du";
// NOTE: We can't pause the clock because we need the reaper to reap
// the 'du' subprocess.
flags.container_disk_watch_interval = Milliseconds(1);
flags.enforce_container_disk_quota = false;
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
const Offer& offer = offers.get()[0];
// Create a task that uses 2MB disk.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:1").get(),
"dd if=/dev/zero of=file bs=1048576 count=2 && sleep 1000");
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offer.id(), {task});
AWAIT_READY(status);
EXPECT_EQ(task.task_id(), status->task_id());
EXPECT_EQ(TASK_RUNNING, status->state());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
const ContainerID& containerId = *(containers->begin());
// Wait until disk usage can be retrieved and the usage actually
// exceeds the limit. If the container is killed due to quota
// enforcement (which shouldn't happen), the 'usage' call will
// return a failed future, leading to a failed test.
Duration elapsed = Duration::zero();
while (true) {
Future<ResourceStatistics> usage = containerizer->usage(containerId);
AWAIT_READY(usage);
ASSERT_TRUE(usage->has_disk_limit_bytes());
EXPECT_EQ(Megabytes(1), Bytes(usage->disk_limit_bytes()));
if (usage->has_disk_used_bytes() &&
usage->disk_used_bytes() > usage->disk_limit_bytes()) {
break;
}
ASSERT_LT(elapsed, Seconds(5));
os::sleep(Milliseconds(1));
elapsed += Milliseconds(1);
}
driver.stop();
driver.join();
//.........这里部分代码省略.........
示例5: driver
// This test verifies that the container will be killed if the disk
// usage exceeds its quota.
TEST_F(DiskQuotaTest, DiskUsageExceedsQuota)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/cpu,posix/mem,disk/du";
// NOTE: We can't pause the clock because we need the reaper to reap
// the 'du' subprocess.
flags.container_disk_watch_interval = Milliseconds(1);
flags.enforce_container_disk_quota = true;
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
const Offer& offer = offers.get()[0];
// Create a task which requests 1MB disk, but actually uses more
// than 2MB disk.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:1").get(),
"dd if=/dev/zero of=file bs=1048576 count=2 && sleep 1000");
Future<TaskStatus> status1;
Future<TaskStatus> status2;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status1))
.WillOnce(FutureArg<1>(&status2));
driver.launchTasks(offer.id(), {task});
AWAIT_READY(status1);
EXPECT_EQ(task.task_id(), status1->task_id());
EXPECT_EQ(TASK_RUNNING, status1->state());
AWAIT_READY(status2);
EXPECT_EQ(task.task_id(), status2->task_id());
EXPECT_EQ(TASK_FAILED, status2->state());
driver.stop();
driver.join();
}
示例6: driver
// This test verifies that the framework can launch a command task
// that specifies both container image and persistent volumes.
TEST_F(LinuxFilesystemIsolatorMesosTest,
ROOT_ChangeRootFilesystemCommandExecutorPersistentVolume)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.resources = "cpus:2;mem:1024;disk(role1):1024";
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_roles(0, "role1");
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
master.get()->pid,
DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
string dir1 = path::join(sandbox.get(), "dir1");
ASSERT_SOME(os::mkdir(dir1));
Resource persistentVolume = createPersistentVolume(
Megabytes(64),
"role1",
"id1",
"path1",
None(),
None(),
frameworkInfo.principal());
// We use the filter explicitly here so that the resources will not
// be filtered for 5 seconds (the default).
Filters filters;
filters.set_refuse_seconds(0);
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:512").get() + persistentVolume,
"echo abc > path1/file");
task.mutable_container()->CopyFrom(createContainerInfo(
"test_image",
{createVolumeHostPath("/tmp", dir1, Volume::RW)}));
// Create the persistent volumes and launch task via `acceptOffers`.
driver.acceptOffers(
{offer.id()},
{CREATE(persistentVolume), LAUNCH({task})},
filters);
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
AWAIT_READY(statusStarting);
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
//.........这里部分代码省略.........
示例7: fetcher
// In this test, the framework is not checkpointed. This ensures that when we
// stop the slave, the executor is killed and we will need to recover the
// working directories without getting any checkpointed recovery state.
TEST_F(ROOT_XFS_QuotaTest, NoCheckpointRecovery)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(
detector.get(),
containerizer.get(),
flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:1").get(),
"dd if=/dev/zero of=file bs=1048576 count=1; sleep 1000");
Future<TaskStatus> runningStatus;
Future<TaskStatus> startingStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&startingStatus))
.WillOnce(FutureArg<1>(&runningStatus))
.WillOnce(Return());
driver.launchTasks(offer.id(), {task});
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), startingStatus->task_id());
EXPECT_EQ(TASK_STARTING, startingStatus->state());
AWAIT_READY(runningStatus);
EXPECT_EQ(task.task_id(), runningStatus->task_id());
EXPECT_EQ(TASK_RUNNING, runningStatus->state());
Future<ResourceUsage> usage1 =
process::dispatch(slave.get()->pid, &Slave::usage);
AWAIT_READY(usage1);
// We should have 1 executor using resources.
ASSERT_EQ(1, usage1->executors().size());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *containers->begin();
// Restart the slave.
slave.get()->terminate();
Future<SlaveReregisteredMessage> slaveReregisteredMessage =
FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _);
_containerizer = MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
containerizer.reset(_containerizer.get());
slave = StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
// Wait until slave recovery is complete.
Future<Nothing> _recover = FUTURE_DISPATCH(_, &Slave::_recover);
AWAIT_READY_FOR(_recover, Seconds(60));
//.........这里部分代码省略.........
示例8: driver
// This is the same logic as DiskUsageExceedsQuota except we turn off disk quota
// enforcement, so exceeding the quota should be allowed.
TEST_F(ROOT_XFS_QuotaTest, DiskUsageExceedsQuotaNoEnforce)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
slave::Flags flags = CreateSlaveFlags();
flags.enforce_container_disk_quota = false;
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
const Offer& offer = offers.get()[0];
// Create a task which requests 1MB disk, but actually uses more
// than 2MB disk.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:1").get(),
"dd if=/dev/zero of=file bs=1048576 count=2");
Future<TaskStatus> startingStatus;
Future<TaskStatus> runningStatus;
Future<TaskStatus> finishedStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&startingStatus))
.WillOnce(FutureArg<1>(&runningStatus))
.WillOnce(FutureArg<1>(&finishedStatus));
driver.launchTasks(offer.id(), {task});
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), startingStatus->task_id());
EXPECT_EQ(TASK_STARTING, startingStatus->state());
AWAIT_READY(runningStatus);
EXPECT_EQ(task.task_id(), runningStatus->task_id());
EXPECT_EQ(TASK_RUNNING, runningStatus->state());
// We expect the task to succeed even though it exceeded
// the disk quota.
AWAIT_READY(finishedStatus);
EXPECT_EQ(task.task_id(), finishedStatus->task_id());
EXPECT_EQ(TASK_FINISHED, finishedStatus->state());
driver.stop();
driver.join();
}
示例9: fetcher
// This test verifies that persistent volumes are unmounted properly
// after a checkpointed framework disappears and the slave restarts.
//
// TODO(jieyu): Even though the command task specifies a new
// filesystem root, the executor (command executor) itself does not
// change filesystem root (uses the host filesystem). We need to add a
// test to test the scenario that the executor itself changes rootfs.
TEST_F(LinuxFilesystemIsolatorMesosTest,
ROOT_RecoverOrphanedPersistentVolume)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.resources = "cpus:2;mem:1024;disk(role1):1024";
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Fetcher fetcher(flags);
Try<MesosContainerizer*> create =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(create);
Owned<Containerizer> containerizer(create.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(
detector.get(),
containerizer.get(),
flags);
ASSERT_SOME(slave);
MockScheduler sched;
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_roles(0, "role1");
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
string dir1 = path::join(sandbox.get(), "dir1");
ASSERT_SOME(os::mkdir(dir1));
Resource persistentVolume = createPersistentVolume(
Megabytes(64),
"role1",
"id1",
"path1",
None(),
None(),
frameworkInfo.principal());
// Create a task that does nothing for a long time.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:512").get() + persistentVolume,
"sleep 1000");
task.mutable_container()->CopyFrom(createContainerInfo(
"test_image",
{createVolumeHostPath("/tmp", dir1, Volume::RW)}));
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillRepeatedly(DoDefault());
Future<Nothing> ack =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
// Create the persistent volumes and launch task via `acceptOffers`.
driver.acceptOffers(
//.........这里部分代码省略.........
示例10: fetcher
TEST_P(CpuIsolatorTest, ROOT_UserCpuUsage)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = GetParam();
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(
detector.get(),
containerizer.get());
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
// Max out a single core in userspace. This will run for at most one
// second.
TaskInfo task = createTask(
offers.get()[0],
"while true ; do true ; done & sleep 60");
Future<TaskStatus> statusRunning;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning));
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *(containers->begin());
// Wait up to 1 second for the child process to induce 1/8 of a
// second of user cpu time.
ResourceStatistics statistics;
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage = containerizer->usage(containerId);
AWAIT_READY(usage);
statistics = usage.get();
// If we meet our usage expectations, we're done!
if (statistics.cpus_user_time_secs() >= 0.125) {
break;
}
os::sleep(Milliseconds(200));
waited += Milliseconds(200);
} while (waited < Seconds(1));
EXPECT_LE(0.125, statistics.cpus_user_time_secs());
driver.stop();
driver.join();
}
示例11: driver
// This test verifies that a task group is launched on the agent if the executor
// provides a valid authentication token specifying its own ContainerID.
TEST_F(ExecutorAuthorizationTest, RunTaskGroup)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
// Start an agent with permissive ACLs so that a task can be launched.
ACLs acls;
acls.set_permissive(true);
slave::Flags flags = CreateSlaveFlags();
flags.acls = acls;
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:0.5;mem:32").get(),
"sleep 1000");
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
Resources executorResources =
allocatedResources(Resources::parse("cpus:0.1;mem:32;disk:32").get(), "*");
ExecutorInfo executor;
executor.mutable_executor_id()->set_value("default");
executor.set_type(ExecutorInfo::DEFAULT);
executor.mutable_framework_id()->CopyFrom(frameworkId.get());
executor.mutable_resources()->CopyFrom(executorResources);
TaskGroupInfo taskGroup;
taskGroup.add_tasks()->CopyFrom(task);
driver.acceptOffers({offer.id()}, {LAUNCH_GROUP(executor, taskGroup)});
AWAIT_READY(status);
ASSERT_EQ(task.task_id(), status->task_id());
EXPECT_EQ(TASK_STARTING, status->state());
driver.stop();
driver.join();
}
示例12: fetcher
// Tests that the logrotate container logger only closes FDs when it
// is supposed to and does not interfere with other FDs on the agent.
TEST_F(ContainerLoggerTest, LOGROTATE_ModuleFDOwnership)
{
// Create a master, agent, and framework.
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Future<SlaveRegisteredMessage> slaveRegisteredMessage =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
// We'll need access to these flags later.
slave::Flags flags = CreateSlaveFlags();
// Use the non-default container logger that rotates logs.
flags.container_logger = LOGROTATE_CONTAINER_LOGGER_NAME;
Fetcher fetcher(flags);
// We use an actual containerizer + executor since we want something to run.
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, false, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegisteredMessage);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
// Wait for an offer, and start a task.
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
// Start a task that will keep running until the end of the test.
TaskInfo task = createTask(offers.get()[0], "sleep 100");
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusKilled;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusKilled))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(statusStarting);
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
// Open multiple files, so that we're fairly certain we've opened
// the same FDs (integers) opened by the container logger.
vector<int> fds;
for (int i = 0; i < 50; i++) {
Try<int> fd = os::open(os::DEV_NULL, O_RDONLY);
ASSERT_SOME(fd);
fds.push_back(fd.get());
}
// Kill the task, which also kills the executor.
driver.killTask(statusRunning->task_id());
AWAIT_READY(statusKilled);
EXPECT_EQ(TASK_KILLED, statusKilled->state());
Future<Nothing> executorTerminated =
FUTURE_DISPATCH(_, &Slave::executorTerminated);
AWAIT_READY(executorTerminated);
// Close all the FDs we opened. Every `close` should succeed.
foreach (int fd, fds) {
ASSERT_SOME(os::close(fd));
}
示例13: fetcher
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
// We only care about memory cgroup for this test.
flags.isolation = "cgroups/mem";
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
// Run a task that triggers memory pressure event. We request 1G
// disk because we are going to write a 512 MB file repeatedly.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:256;disk:1024").get(),
"while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
Future<TaskStatus> starting;
Future<TaskStatus> running;
Future<TaskStatus> killed;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&starting))
.WillOnce(FutureArg<1>(&running))
.WillOnce(FutureArg<1>(&killed))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offer.id(), {task});
AWAIT_READY(starting);
EXPECT_EQ(task.task_id(), starting->task_id());
EXPECT_EQ(TASK_STARTING, starting->state());
AWAIT_READY(running);
EXPECT_EQ(task.task_id(), running->task_id());
EXPECT_EQ(TASK_RUNNING, running->state());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *(containers->begin());
// Wait a while for some memory pressure events to occur.
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage = containerizer->usage(containerId);
AWAIT_READY(usage);
if (usage->mem_low_pressure_counter() > 0) {
// We will check the correctness of the memory pressure counters
// later, because the memory-hammering task is still active
// and potentially incrementing these counters.
break;
}
os::sleep(Milliseconds(100));
waited += Milliseconds(100);
} while (waited < Seconds(5));
EXPECT_LE(waited, Seconds(5));
// Pause the clock to ensure that the reaper doesn't reap the exited
// command executor and inform the containerizer/slave.
Clock::pause();
Clock::settle();
// Stop the memory-hammering task.
//.........这里部分代码省略.........
示例14: execSuperhero
//.........这里部分代码省略.........
flags.resources = "cpus(" + roleSuperhero + "):2;" + "cpus(" + roleMuggle +
"):3;mem(" + roleSuperhero + "):512;" + "mem(" + roleMuggle + "):1024;";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = this->StartSlave(
detector.get(), &containerizer, authorizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler schedSuperhero;
MesosSchedulerDriver driverSuperhero(
&schedSuperhero,
frameworkSuperhero,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(execSuperhero, registered(_, _, _, _))
.Times(AtMost(1));
Future<FrameworkID> frameworkIdSuperhero;
EXPECT_CALL(schedSuperhero, registered(&driverSuperhero, _, _))
.WillOnce(FutureArg<1>(&frameworkIdSuperhero));
Future<vector<Offer>> offersSuperhero;
EXPECT_CALL(schedSuperhero, resourceOffers(&driverSuperhero, _))
.WillOnce(FutureArg<1>(&offersSuperhero))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driverSuperhero.start();
AWAIT_READY(frameworkIdSuperhero);
AWAIT_READY(offersSuperhero);
ASSERT_FALSE(offersSuperhero->empty());
// Define a task which will run on executorSuperhero of frameworkSuperhero.
TaskInfo taskSuperhero;
taskSuperhero.set_name("test-" + roleSuperhero);
taskSuperhero.mutable_task_id()->set_value("1");
taskSuperhero.mutable_slave_id()->MergeFrom(
offersSuperhero.get()[0].slave_id());
taskSuperhero.mutable_resources()->MergeFrom(
offersSuperhero.get()[0].resources());
taskSuperhero.mutable_executor()->MergeFrom(executorSuperhero);
EXPECT_CALL(execSuperhero, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING))
.WillRepeatedly(Return());
Future<TaskStatus> statusSuperhero;
EXPECT_CALL(schedSuperhero, statusUpdate(&driverSuperhero, _))
.WillOnce(FutureArg<1>(&statusSuperhero));
driverSuperhero.launchTasks(offersSuperhero.get()[0].id(), {taskSuperhero});
AWAIT_READY(statusSuperhero);
EXPECT_EQ(TASK_RUNNING, statusSuperhero->state());
MockScheduler schedMuggle;
MesosSchedulerDriver driverMuggle(
&schedMuggle,
frameworkMuggle,
master.get()->pid,
DEFAULT_CREDENTIAL_2);
EXPECT_CALL(execMuggle, registered(_, _, _, _))