本文整理汇总了C++中Offer::slave_id方法的典型用法代码示例。如果您正苦于以下问题:C++ Offer::slave_id方法的具体用法?C++ Offer::slave_id怎么用?C++ Offer::slave_id使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Offer
的用法示例。
在下文中一共展示了Offer::slave_id方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: createTask
inline TaskInfo createTask(
const Offer& offer,
const std::string& command,
const Option<mesos::ExecutorID>& executorId = None(),
const std::string& name = "test-task",
const std::string& id = UUID::random().toString())
{
return createTask(
offer.slave_id(), offer.resources(), command, executorId, name, id);
}
示例2: createTasks
// TODO(benh): Move this into utils, make more generic, and use in
// other tests.
vector<TaskInfo> createTasks(const Offer& offer)
{
TaskInfo task;
task.set_name("test-task");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offer.slave_id());
task.mutable_resources()->MergeFrom(offer.resources());
task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO);
vector<TaskInfo> tasks;
tasks.push_back(task);
return tasks;
}
示例3: createTask
inline TaskInfo createTask(
const Offer& offer,
const std::string& command,
const std::string& name = "test-task",
const std::string& id = UUID::random().toString())
{
TaskInfo task;
task.set_name(name);
task.mutable_task_id()->set_value(id);
task.mutable_slave_id()->MergeFrom(offer.slave_id());
task.mutable_resources()->MergeFrom(offer.resources());
task.mutable_command()->set_value(command);
return task;
}
示例4: createTask
inline TaskInfo createTask(
const Offer& offer,
const std::string& command,
const Option<mesos::ExecutorID>& executorId = None(),
const std::string& name = "test-task",
const std::string& id = UUID::random().toString())
{
TaskInfo task;
task.set_name(name);
task.mutable_task_id()->set_value(id);
task.mutable_slave_id()->CopyFrom(offer.slave_id());
task.mutable_resources()->CopyFrom(offer.resources());
if (executorId.isSome()) {
ExecutorInfo executor;
executor.mutable_executor_id()->CopyFrom(executorId.get());
executor.mutable_command()->set_value(command);
task.mutable_executor()->CopyFrom(executor);
} else {
task.mutable_command()->set_value(command);
}
return task;
}
示例5: populateTasks
vector<TaskInfo> populateTasks(
const string& cmd,
CommandInfo healthCommand,
const Offer& offer,
int gracePeriodSeconds = 0,
const Option<int>& consecutiveFailures = None(),
const Option<map<string, string> >& env = None())
{
TaskInfo task;
task.set_name("");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->CopyFrom(offer.slave_id());
task.mutable_resources()->CopyFrom(offer.resources());
CommandInfo command;
command.set_value(cmd);
Environment::Variable* variable =
command.mutable_environment()->add_variables();
// We need to set the correct directory to launch health check process
// instead of the default for tests.
variable->set_name("MESOS_LAUNCHER_DIR");
variable->set_value(path::join(tests::flags.build_dir, "src"));
task.mutable_command()->CopyFrom(command);
HealthCheck healthCheck;
if (env.isSome()) {
foreachpair (const string& name, const string value, env.get()) {
Environment::Variable* variable =
healthCommand.mutable_environment()->mutable_variables()->Add();
variable->set_name(name);
variable->set_value(value);
}
}
示例6: driver
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
// We only care about memory cgroup for this test.
flags.isolation = "cgroups/mem";
flags.agent_subsystems = None();
Fetcher fetcher;
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
Offer offer = offers.get()[0];
// Run a task that triggers memory pressure event. We request 1G
// disk because we are going to write a 512 MB file repeatedly.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:256;disk:1024").get(),
"while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
Future<TaskStatus> running;
Future<TaskStatus> killed;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&running))
.WillOnce(FutureArg<1>(&killed))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offer.id(), {task});
AWAIT_READY(running);
EXPECT_EQ(task.task_id(), running.get().task_id());
EXPECT_EQ(TASK_RUNNING, running.get().state());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers.get().size());
ContainerID containerId = *(containers.get().begin());
// Wait a while for some memory pressure events to occur.
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage = containerizer->usage(containerId);
AWAIT_READY(usage);
if (usage.get().mem_low_pressure_counter() > 0) {
// We will check the correctness of the memory pressure counters
// later, because the memory-hammering task is still active
// and potentially incrementing these counters.
break;
}
os::sleep(Milliseconds(100));
waited += Milliseconds(100);
} while (waited < Seconds(5));
EXPECT_LE(waited, Seconds(5));
// Pause the clock to ensure that the reaper doesn't reap the exited
// command executor and inform the containerizer/slave.
Clock::pause();
Clock::settle();
// Stop the memory-hammering task.
driver.killTask(task.task_id());
AWAIT_READY_FOR(killed, Seconds(120));
EXPECT_EQ(task.task_id(), killed->task_id());
EXPECT_EQ(TASK_KILLED, killed->state());
//.........这里部分代码省略.........
示例7: driver
// Test that memory pressure listening is restarted after recovery.
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
{
Try<PID<Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
// We only care about memory cgroup for this test.
flags.isolation = "cgroups/mem";
flags.slave_subsystems = None();
Fetcher fetcher;
Try<MesosContainerizer*> containerizer1 =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(containerizer1);
Try<PID<Slave>> slave = StartSlave(containerizer1.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
// Enable checkpointing for the framework.
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get(), DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
Offer offer = offers.get()[0];
// Run a task that triggers memory pressure event. We request 1G
// disk because we are going to write a 512 MB file repeatedly.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:256;disk:1024").get(),
"while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status))
.WillRepeatedly(Return()); // Ignore subsequent updates.
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(status);
EXPECT_EQ(task.task_id(), status.get().task_id());
EXPECT_EQ(TASK_RUNNING, status.get().state());
// We restart the slave to let it recover.
Stop(slave.get());
delete containerizer1.get();
// Set up so we can wait until the new slave updates the container's
// resources (this occurs after the executor has re-registered).
Future<Nothing> update =
FUTURE_DISPATCH(_, &MesosContainerizerProcess::update);
// Use the same flags.
Try<MesosContainerizer*> containerizer2 =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(containerizer2);
slave = StartSlave(containerizer2.get(), flags);
ASSERT_SOME(slave);
// Wait until the containerizer is updated.
AWAIT_READY(update);
Future<hashset<ContainerID>> containers = containerizer2.get()->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers.get().size());
ContainerID containerId = *(containers.get().begin());
// Wait a while for some memory pressure events to occur.
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage = containerizer2.get()->usage(containerId);
AWAIT_READY(usage);
if (usage.get().mem_low_pressure_counter() > 0) {
// We will check the correctness of the memory pressure counters
// later, because the memory-hammering task is still active
// and potentially incrementing these counters.
break;
//.........这里部分代码省略.........
示例8: fetcher
// Test that memory pressure listening is restarted after recovery.
TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
// We only care about memory cgroup for this test.
flags.isolation = "cgroups/mem";
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
// Enable checkpointing for the framework.
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
// Run a task that triggers memory pressure event. We request 1G
// disk because we are going to write a 512 MB file repeatedly.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:256;disk:1024").get(),
"while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
Future<TaskStatus> starting;
Future<TaskStatus> running;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&starting))
.WillOnce(FutureArg<1>(&running))
.WillRepeatedly(Return()); // Ignore subsequent updates.
Future<Nothing> runningAck =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
Future<Nothing> startingAck =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(starting);
EXPECT_EQ(task.task_id(), starting->task_id());
EXPECT_EQ(TASK_STARTING, starting->state());
AWAIT_READY(startingAck);
AWAIT_READY(running);
EXPECT_EQ(task.task_id(), running->task_id());
EXPECT_EQ(TASK_RUNNING, running->state());
// Wait for the ACK to be checkpointed.
AWAIT_READY_FOR(runningAck, Seconds(120));
// We restart the slave to let it recover.
slave.get()->terminate();
// Set up so we can wait until the new slave updates the container's
// resources (this occurs after the executor has re-registered).
Future<Nothing> update =
FUTURE_DISPATCH(_, &MesosContainerizerProcess::update);
// Use the same flags.
_containerizer = MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
containerizer.reset(_containerizer.get());
Future<SlaveReregisteredMessage> reregistered =
FUTURE_PROTOBUF(SlaveReregisteredMessage(), master.get()->pid, _);
slave = StartSlave(detector.get(), containerizer.get(), flags);
//.........这里部分代码省略.........
示例9: driver
// This test verifies that a task is launched on the agent if the task
// user is authorized based on `run_tasks` ACL configured on the agent
// to only allow whitelisted users to run tasks on the agent.
TYPED_TEST(SlaveAuthorizerTest, AuthorizeRunTaskOnAgent)
{
// Get the current user.
Result<string> user = os::user();
ASSERT_SOME(user) << "Failed to get the current user name"
<< (user.isError() ? ": " + user.error() : "");
Try<Owned<cluster::Master>> master = this->StartMaster();
ASSERT_SOME(master);
// Start a slave with `bar` and the current user being the only authorized
// users to launch tasks on the agent.
ACLs acls;
acls.set_permissive(false); // Restrictive.
mesos::ACL::RunTask* acl = acls.add_run_tasks();
acl->mutable_principals()->set_type(ACL::Entity::ANY);
acl->mutable_users()->add_values("bar");
acl->mutable_users()->add_values(user.get());
slave::Flags slaveFlags = this->CreateSlaveFlags();
slaveFlags.acls = acls;
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = this->StartSlave(
detector.get(), slaveFlags);
ASSERT_SOME(slave);
// Create a framework with user `foo`.
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_user("foo");
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
// Framework is registered since the master admits frameworks of any user.
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
// Launch the first task with no user, so it defaults to the
// framework user `foo`.
TaskInfo task1 = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:32").get(),
"sleep 1000");
// Launch the second task as the current user.
TaskInfo task2 = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:32").get(),
"sleep 1000");
task2.mutable_command()->set_user(user.get());
// The first task should fail since the task user `foo` is not an
// authorized user that can launch a task. However, the second task
// should succeed.
Future<TaskStatus> status0;
Future<TaskStatus> status1;
Future<TaskStatus> status2;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status0))
.WillOnce(FutureArg<1>(&status1))
.WillOnce(FutureArg<1>(&status2));
driver.acceptOffers(
{offer.id()},
{LAUNCH({task1, task2})});
// Wait for TASK_ERROR for 1st task, and TASK_STARTING followed by
// TASK_RUNNING for 2nd task.
AWAIT_READY(status0);
AWAIT_READY(status1);
AWAIT_READY(status2);
// Validate both the statuses. Note that the order of receiving the
// status updates for the 2 tasks is not deterministic, but we know
// that task2's TASK_RUNNING arrives after TASK_STARTING.
hashmap<TaskID, TaskStatus> statuses;
statuses[status0->task_id()] = status0.get();
statuses[status1->task_id()] = status1.get();
statuses[status2->task_id()] = status2.get();
//.........这里部分代码省略.........
示例10: fetcher
// In this test, the framework is not checkpointed. This ensures that when we
// stop the slave, the executor is killed and we will need to recover the
// working directories without getting any checkpointed recovery state.
TEST_F(ROOT_XFS_QuotaTest, NoCheckpointRecovery)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
Fetcher fetcher(flags);
Try<MesosContainerizer*> _containerizer =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
Owned<MesosContainerizer> containerizer(_containerizer.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(
detector.get(),
containerizer.get(),
flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:1").get(),
"dd if=/dev/zero of=file bs=1048576 count=1; sleep 1000");
Future<TaskStatus> runningStatus;
Future<TaskStatus> startingStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&startingStatus))
.WillOnce(FutureArg<1>(&runningStatus))
.WillOnce(Return());
driver.launchTasks(offer.id(), {task});
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), startingStatus->task_id());
EXPECT_EQ(TASK_STARTING, startingStatus->state());
AWAIT_READY(runningStatus);
EXPECT_EQ(task.task_id(), runningStatus->task_id());
EXPECT_EQ(TASK_RUNNING, runningStatus->state());
Future<ResourceUsage> usage1 =
process::dispatch(slave.get()->pid, &Slave::usage);
AWAIT_READY(usage1);
// We should have 1 executor using resources.
ASSERT_EQ(1, usage1->executors().size());
Future<hashset<ContainerID>> containers = containerizer->containers();
AWAIT_READY(containers);
ASSERT_EQ(1u, containers->size());
ContainerID containerId = *containers->begin();
// Restart the slave.
slave.get()->terminate();
Future<SlaveReregisteredMessage> slaveReregisteredMessage =
FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _);
_containerizer = MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(_containerizer);
containerizer.reset(_containerizer.get());
slave = StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
// Wait until slave recovery is complete.
Future<Nothing> _recover = FUTURE_DISPATCH(_, &Slave::_recover);
AWAIT_READY_FOR(_recover, Seconds(60));
//.........这里部分代码省略.........
示例11: fetcher
// This test verifies that persistent volumes are unmounted properly
// after a checkpointed framework disappears and the slave restarts.
//
// TODO(jieyu): Even though the command task specifies a new
// filesystem root, the executor (command executor) itself does not
// change filesystem root (uses the host filesystem). We need to add a
// test to test the scenario that the executor itself changes rootfs.
TEST_F(LinuxFilesystemIsolatorMesosTest,
ROOT_RecoverOrphanedPersistentVolume)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.resources = "cpus:2;mem:1024;disk(role1):1024";
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Fetcher fetcher(flags);
Try<MesosContainerizer*> create =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(create);
Owned<Containerizer> containerizer(create.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(
detector.get(),
containerizer.get(),
flags);
ASSERT_SOME(slave);
MockScheduler sched;
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_roles(0, "role1");
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
string dir1 = path::join(sandbox.get(), "dir1");
ASSERT_SOME(os::mkdir(dir1));
Resource persistentVolume = createPersistentVolume(
Megabytes(64),
"role1",
"id1",
"path1",
None(),
None(),
frameworkInfo.principal());
// Create a task that does nothing for a long time.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:512").get() + persistentVolume,
"sleep 1000");
task.mutable_container()->CopyFrom(createContainerInfo(
"test_image",
{createVolumeHostPath("/tmp", dir1, Volume::RW)}));
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillRepeatedly(DoDefault());
Future<Nothing> ack =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
// Create the persistent volumes and launch task via `acceptOffers`.
driver.acceptOffers(
//.........这里部分代码省略.........
示例12: driver
// This test verifies that the framework can launch a command task
// that specifies both container image and persistent volumes.
TEST_F(LinuxFilesystemIsolatorMesosTest,
ROOT_ChangeRootFilesystemCommandExecutorPersistentVolume)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.resources = "cpus:2;mem:1024;disk(role1):1024";
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_roles(0, "role1");
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
master.get()->pid,
DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
string dir1 = path::join(sandbox.get(), "dir1");
ASSERT_SOME(os::mkdir(dir1));
Resource persistentVolume = createPersistentVolume(
Megabytes(64),
"role1",
"id1",
"path1",
None(),
None(),
frameworkInfo.principal());
// We use the filter explicitly here so that the resources will not
// be filtered for 5 seconds (the default).
Filters filters;
filters.set_refuse_seconds(0);
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:512").get() + persistentVolume,
"echo abc > path1/file");
task.mutable_container()->CopyFrom(createContainerInfo(
"test_image",
{createVolumeHostPath("/tmp", dir1, Volume::RW)}));
// Create the persistent volumes and launch task via `acceptOffers`.
driver.acceptOffers(
{offer.id()},
{CREATE(persistentVolume), LAUNCH({task})},
filters);
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
AWAIT_READY(statusStarting);
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
//.........这里部分代码省略.........
示例13: driver
// In this test, the framework is checkpointed so we expect the executor to
// persist across the slave restart and to have the same resource usage before
// and after.
TEST_F(ROOT_XFS_QuotaTest, CheckpointRecovery)
{
slave::Flags flags = CreateSlaveFlags();
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), CreateSlaveFlags());
ASSERT_SOME(slave);
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_checkpoint(true);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128;disk:1").get(),
"dd if=/dev/zero of=file bs=1048576 count=1; sleep 1000");
Future<TaskStatus> startingStatus;
Future<TaskStatus> runningStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&startingStatus))
.WillOnce(FutureArg<1>(&runningStatus));
driver.launchTasks(offer.id(), {task});
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), startingStatus->task_id());
EXPECT_EQ(TASK_STARTING, startingStatus->state());
AWAIT_READY(startingStatus);
EXPECT_EQ(task.task_id(), runningStatus->task_id());
EXPECT_EQ(TASK_RUNNING, runningStatus->state());
Future<ResourceUsage> usage1 =
process::dispatch(slave.get()->pid, &Slave::usage);
AWAIT_READY(usage1);
// We should have 1 executor using resources.
ASSERT_EQ(1, usage1->executors().size());
// Restart the slave.
slave.get()->terminate();
Future<SlaveReregisteredMessage> slaveReregisteredMessage =
FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _);
slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
// Wait for the slave to re-register.
AWAIT_READY(slaveReregisteredMessage);
Future<ResourceUsage> usage2 =
process::dispatch(slave.get()->pid, &Slave::usage);
AWAIT_READY(usage2);
// We should have still have 1 executor using resources.
ASSERT_EQ(1, usage1->executors().size());
Try<std::list<string>> sandboxes = os::glob(path::join(
slave::paths::getSandboxRootDir(mountPoint.get()),
"*",
"frameworks",
"*",
"executors",
"*",
"runs",
"*"));
ASSERT_SOME(sandboxes);
// One sandbox and one symlink.
ASSERT_EQ(2u, sandboxes->size());
// Scan the remaining sandboxes. We ought to still have project IDs
// assigned to them all.
foreach (const string& sandbox, sandboxes.get()) {
//.........这里部分代码省略.........
示例14: exec
// This test verifies the slave will destroy a container if, when
// receiving a terminal status task update, updating the container's
// resources fails.
TEST_F(SlaveTest, TerminalTaskContainerizerUpdateFails)
{
// Start a master.
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
MockExecutor exec(DEFAULT_EXECUTOR_ID);
EXPECT_CALL(exec, registered(_, _, _, _));
TestContainerizer containerizer(&exec);
// Start a slave.
Try<PID<Slave> > slave = StartSlave(&containerizer);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer> > offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
Offer offer = offers.get()[0];
// Start two tasks.
vector<TaskInfo> tasks;
tasks.push_back(createTask(
offer.slave_id(),
Resources::parse("cpus:0.1;mem:32").get(),
"sleep 1000",
exec.id));
tasks.push_back(createTask(
offer.slave_id(),
Resources::parse("cpus:0.1;mem:32").get(),
"sleep 1000",
exec.id));
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
Future<TaskStatus> status1, status2, status3, status4;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status1))
.WillOnce(FutureArg<1>(&status2))
.WillOnce(FutureArg<1>(&status3))
.WillOnce(FutureArg<1>(&status4));
driver.launchTasks(offer.id(), tasks);
AWAIT_READY(status1);
EXPECT_EQ(TASK_RUNNING, status1.get().state());
AWAIT_READY(status2);
EXPECT_EQ(TASK_RUNNING, status2.get().state());
// Set up the containerizer so the next update() will fail.
EXPECT_CALL(containerizer, update(_, _))
.WillOnce(Return(process::Failure("update() failed")))
.WillRepeatedly(Return(Nothing()));
EXPECT_CALL(exec, killTask(_, _))
.WillOnce(SendStatusUpdateFromTaskID(TASK_KILLED));
// Kill one of the tasks. The failed update should result in the
// second task going lost when the container is destroyed.
driver.killTask(tasks[0].task_id());
AWAIT_READY(status3);
EXPECT_EQ(TASK_KILLED, status3.get().state());
AWAIT_READY(status4);
EXPECT_EQ(TASK_LOST, status4.get().state());
driver.stop();
driver.join();
Shutdown();
}
示例15: exec
TEST_F(FaultToleranceTest, ForwardStatusUpdateUnknownExecutor)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
MockExecutor exec(DEFAULT_EXECUTOR_ID);
Try<PID<Slave> > slave = StartSlave(&exec);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
FrameworkID frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(SaveArg<1>(&frameworkId));
Future<vector<Offer> > offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers));
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
Offer offer = offers.get()[0];
TaskInfo task;
task.set_name("");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offer.slave_id());
task.mutable_resources()->MergeFrom(offer.resources());
task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO);
vector<TaskInfo> tasks;
tasks.push_back(task);
Future<Nothing> statusUpdate;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureSatisfy(&statusUpdate)); // TASK_RUNNING of task1.
EXPECT_CALL(exec, registered(_, _, _, _));
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
driver.launchTasks(offer.id(), tasks);
// Wait until TASK_RUNNING of task1 is received.
AWAIT_READY(statusUpdate);
// Simulate the slave receiving status update from an unknown
// (e.g. exited) executor of the given framework.
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status)); // TASK_RUNNING of task2.
TaskID taskId;
taskId.set_value("task2");
StatusUpdate statusUpdate2 = createStatusUpdate(
frameworkId, offer.slave_id(), taskId, TASK_RUNNING, "Dummy update");
process::dispatch(slave.get(), &Slave::statusUpdate, statusUpdate2);
// Ensure that the scheduler receives task2's update.
AWAIT_READY(status);
EXPECT_EQ(taskId, status.get().task_id());
EXPECT_EQ(TASK_RUNNING, status.get().state());
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
Shutdown();
}