本文整理汇总了C++中TaskInfo::mutable_container方法的典型用法代码示例。如果您正苦于以下问题:C++ TaskInfo::mutable_container方法的具体用法?C++ TaskInfo::mutable_container怎么用?C++ TaskInfo::mutable_container使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类TaskInfo
的用法示例。
在下文中一共展示了TaskInfo::mutable_container方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: buildTask
TaskInfo buildTask (string hostname, string id, const SlaveID& slave) {
hostProfile profile = hostList[hostname];
// Define the Docker container.
/* Since there is no "executor" to manage the tasks, the
container will be built and attached directly into the task below */
ContainerInfo container;
container.set_type(container.DOCKER);
ContainerInfo::DockerInfo docker;
docker.set_image(DOCKER_IMAGE);
container.mutable_docker()->MergeFrom(docker);
// Mount local volume inside Container
Volume * volume = container.add_volumes();
volume->set_container_path("/mnt");
volume->set_host_path("/local/mesos");
volume->set_mode(Volume_Mode_RW);
// Define the task
TaskInfo task;
task.set_name("K3-" + k3binary);
task.mutable_task_id()->set_value(id);
task.mutable_slave_id()->MergeFrom(slave);
task.mutable_container()->MergeFrom(container);
//task.set_data(stringify(localTasks));
// Define include files for the command
CommandInfo command;
CommandInfo_URI * k3_bin = command.add_uris();
k3_bin->set_value(fileServer + "/" + k3binary);
k3_bin->set_executable(true);
k3_bin->set_extract(false);
// CommandInfo_URI * k3_args = command.add_uris();
// k3_args->set_value(runpath + "/k3input.yaml");
// command.set_value("$MESOS_SANDBOX/" + k3binary + " -l INFO -p " +
// "$MESOS_SANDBOX/k3input.yaml");
task.mutable_command()->MergeFrom(command);
// Option A for doing resources management (see scheduler for option B)
Resource* resource;
resource = task.add_resources();
resource->set_name("cpus");
resource->set_type(Value::SCALAR);
resource->mutable_scalar()->set_value(profile.cpu);
resource = task.add_resources();
resource->set_name("mem");
resource->set_type(Value::SCALAR);
resource->mutable_scalar()->set_value(profile.mem);
return task;
}
示例2: resourceOffers
virtual void resourceOffers(
SchedulerDriver* driver,
const vector<Offer>& offers)
{
static const Try<Resources> TASK_RESOURCES = Resources::parse(resources);
if (TASK_RESOURCES.isError()) {
cerr << "Failed to parse resources '" << resources
<< "': " << TASK_RESOURCES.error() << endl;
driver->abort();
return;
}
foreach (const Offer& offer, offers) {
if (!launched &&
Resources(offer.resources()).contains(TASK_RESOURCES.get())) {
TaskInfo task;
task.set_name(name);
task.mutable_task_id()->set_value(name);
task.mutable_slave_id()->MergeFrom(offer.slave_id());
task.mutable_resources()->CopyFrom(TASK_RESOURCES.get());
task.mutable_command()->set_value(command);
if (uri.isSome()) {
task.mutable_command()->add_uris()->set_value(uri.get());
}
if (dockerImage.isSome()) {
ContainerInfo containerInfo;
containerInfo.set_type(ContainerInfo::DOCKER);
ContainerInfo::DockerInfo dockerInfo;
dockerInfo.set_image(dockerImage.get());
containerInfo.mutable_docker()->CopyFrom(dockerInfo);
task.mutable_container()->CopyFrom(containerInfo);
}
vector<TaskInfo> tasks;
tasks.push_back(task);
driver->launchTasks(offer.id(), tasks);
cout << "task " << name << " submitted to slave "
<< offer.slave_id() << endl;
launched = true;
} else {
driver->declineOffer(offer.id());
}
}
}
示例3: driver
// This test confirms that if a task exceeds configured resource
// limits it is forcibly terminated.
TEST_F(PosixRLimitsIsolatorTest, TaskExceedingLimit)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/rlimits";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
// The task attempts to use an infinite amount of CPU time.
TaskInfo task = createTask(
offers.get()[0].slave_id(),
offers.get()[0].resources(),
"while true; do true; done");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
// Limit the process to use maximally 1 second of CPU time.
RLimitInfo rlimitInfo;
RLimitInfo::RLimit* cpuLimit = rlimitInfo.add_rlimits();
cpuLimit->set_type(RLimitInfo::RLimit::RLMT_CPU);
cpuLimit->set_soft(1);
cpuLimit->set_hard(1);
container->mutable_rlimit_info()->CopyFrom(rlimitInfo);
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFailed;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFailed));
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(statusRunning);
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFailed);
EXPECT_EQ(task.task_id(), statusFailed->task_id());
EXPECT_EQ(TASK_FAILED, statusFailed->state());
driver.stop();
driver.join();
}
示例4: MockDocker
// Test that the prepare launch docker hook execute before launch
// a docker container. Test hook create a file "foo" in the sandbox
// directory. When the docker container launched, the sandbox directory
// is mounted to the docker container. We validate the hook by verifying
// the "foo" file exists in the docker container or not.
TEST_F(HookTest, ROOT_DOCKER_VerifySlavePreLaunchDockerHook)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
MockDocker* mockDocker =
new MockDocker(tests::flags.docker, tests::flags.docker_socket);
Shared<Docker> docker(mockDocker);
slave::Flags flags = CreateSlaveFlags();
Fetcher fetcher;
Try<ContainerLogger*> logger =
ContainerLogger::create(flags.container_logger);
ASSERT_SOME(logger);
MockDockerContainerizer containerizer(
flags,
&fetcher,
Owned<ContainerLogger>(logger.get()),
docker);
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave =
StartSlave(detector.get(), &containerizer, flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_NE(0u, offers.get().size());
const Offer& offer = offers.get()[0];
SlaveID slaveId = offer.slave_id();
TaskInfo task;
task.set_name("");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->CopyFrom(offer.slave_id());
task.mutable_resources()->CopyFrom(offer.resources());
CommandInfo command;
command.set_value("test -f " + path::join(flags.sandbox_directory, "foo"));
ContainerInfo containerInfo;
containerInfo.set_type(ContainerInfo::DOCKER);
// TODO(tnachen): Use local image to test if possible.
ContainerInfo::DockerInfo dockerInfo;
dockerInfo.set_image("alpine");
containerInfo.mutable_docker()->CopyFrom(dockerInfo);
task.mutable_command()->CopyFrom(command);
task.mutable_container()->CopyFrom(containerInfo);
vector<TaskInfo> tasks;
tasks.push_back(task);
Future<ContainerID> containerId;
EXPECT_CALL(containerizer, launch(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(FutureArg<0>(&containerId),
Invoke(&containerizer,
&MockDockerContainerizer::_launch)));
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished))
.WillRepeatedly(DoDefault());
driver.launchTasks(offers.get()[0].id(), tasks);
AWAIT_READY_FOR(containerId, Seconds(60));
AWAIT_READY_FOR(statusRunning, Seconds(60));
//.........这里部分代码省略.........
示例5: resourceOffers
virtual void resourceOffers(
SchedulerDriver* driver,
const vector<Offer>& offers)
{
static const Try<Resources> TASK_RESOURCES = Resources::parse(resources);
if (TASK_RESOURCES.isError()) {
cerr << "Failed to parse resources '" << resources
<< "': " << TASK_RESOURCES.error() << endl;
driver->abort();
return;
}
for (const Offer& offer : offers) {
if (!launched &&
Resources(offer.resources()).contains(TASK_RESOURCES.get())) {
TaskInfo task;
task.set_name(name);
task.mutable_task_id()->set_value(name);
task.mutable_slave_id()->MergeFrom(offer.slave_id());
task.mutable_resources()->CopyFrom(TASK_RESOURCES.get());
CommandInfo* commandInfo = task.mutable_command();
commandInfo->set_value(command);
// if (environment.isSome()) {
// Environment* environment_ = commandInfo->mutable_environment();
// foreachpair (const std::string& name,
// const std::string& value,
// environment.get()) {
// Environment_Variable* environmentVariable =
// environment_->add_variables();
// environmentVariable->set_name(name);
// environmentVariable->set_value(value);
// }
// }
if (dockerImage.isSome()) {
ContainerInfo containerInfo;
if (containerizer == "mesos") {
containerInfo.set_type(ContainerInfo::MESOS);
ContainerInfo::MesosInfo mesosInfo;
Image mesosImage;
mesosImage.set_type(Image::DOCKER);
mesosImage.mutable_docker()->set_name(dockerImage.get());
mesosInfo.mutable_image()->CopyFrom(mesosImage);
containerInfo.mutable_mesos()->CopyFrom(mesosInfo);
} else if (containerizer == "docker") {
containerInfo.set_type(ContainerInfo::DOCKER);
ContainerInfo::DockerInfo dockerInfo;
dockerInfo.set_image(dockerImage.get());
containerInfo.mutable_docker()->CopyFrom(dockerInfo);
} else {
cerr << "Unsupported containerizer: " << containerizer << endl;;
driver->abort();
return;
}
task.mutable_container()->CopyFrom(containerInfo);
}
vector<TaskInfo> tasks;
tasks.push_back(task);
driver->launchTasks(offer.id(), tasks);
cout << "task " << name << " submitted to slave "
<< offer.slave_id() << endl;
launched = true;
} else {
driver->declineOffer(offer.id());
}
}
}
示例6: driver
// This test launches a container which has an image and joins host
// network, and then verifies that the container can access Internet.
TEST_F(CniIsolatorTest, ROOT_INTERNET_CURL_LaunchContainerInHostNetwork)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "docker/runtime,filesystem/linux";
flags.image_providers = "docker";
flags.docker_store_dir = path::join(sandbox.get(), "store");
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_EQ(1u, offers->size());
const Offer& offer = offers.get()[0];
// NOTE: We use a non-shell command here because 'sh' might not be
// in the PATH. 'alpine' does not specify env PATH in the image.
CommandInfo command;
command.set_shell(false);
command.set_value("/bin/ping");
command.add_arguments("/bin/ping");
command.add_arguments("-c1");
command.add_arguments("google.com");
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:128").get(),
command);
Image image;
image.set_type(Image::DOCKER);
image.mutable_docker()->set_name("alpine");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
container->mutable_mesos()->mutable_image()->CopyFrom(image);
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
driver.launchTasks(offer.id(), {task});
AWAIT_READY_FOR(statusRunning, Seconds(60));
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(task.task_id(), statusFinished->task_id());
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
driver.stop();
driver.join();
}
示例7: driver
// This test verifies that docker image default cmd is executed correctly.
// This corresponds to the case in runtime isolator logic table: sh=0,
// value=0, argv=1, entrypoint=0, cmd=1.
TEST_F(DockerRuntimeIsolatorTest, ROOT_DockerDefaultCmdLocalPuller)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
const string directory = path::join(os::getcwd(), "archives");
Future<Nothing> testImage =
DockerArchive::create(directory, "alpine", "null", "[\"sh\"]");
AWAIT_READY(testImage);
ASSERT_TRUE(os::exists(path::join(directory, "alpine.tar")));
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "docker/runtime,filesystem/linux";
flags.image_providers = "docker";
flags.docker_registry = directory;
// Make docker store directory as a temparary directory. Because the
// manifest of the test image is changeable, the image cached on
// previous tests should never be used.
flags.docker_store_dir = path::join(os::getcwd(), "store");
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_EQ(1u, offers->size());
const Offer& offer = offers.get()[0];
TaskInfo task;
task.set_name("test-task");
task.mutable_task_id()->set_value(UUID::random().toString());
task.mutable_slave_id()->CopyFrom(offer.slave_id());
task.mutable_resources()->CopyFrom(Resources::parse("cpus:1;mem:128").get());
task.mutable_command()->set_shell(false);
task.mutable_command()->add_arguments("-c");
task.mutable_command()->add_arguments("echo 'hello world'");
Image image;
image.set_type(Image::DOCKER);
image.mutable_docker()->set_name("alpine");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
container->mutable_mesos()->mutable_image()->CopyFrom(image);
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
driver.launchTasks(offer.id(), {task});
AWAIT_READY_FOR(statusRunning, Seconds(60));
EXPECT_EQ(task.task_id(), statusRunning->task_id());
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(task.task_id(), statusFinished->task_id());
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
driver.stop();
driver.join();
}
示例8: resourceOffers
virtual void resourceOffers(SchedulerDriver* driver,
const vector<Offer>& offers)
{
cout << "." << flush;
for (size_t i = 0; i < offers.size(); i++) {
const Offer& offer = offers[i];
// Lookup resources we care about.
// TODO(benh): It would be nice to ultimately have some helper
// functions for looking up resources.
double cpus = 0;
double mem = 0;
for (int i = 0; i < offer.resources_size(); i++) {
const Resource& resource = offer.resources(i);
if (resource.name() == "cpus" &&
resource.type() == Value::SCALAR) {
cpus = resource.scalar().value();
} else if (resource.name() == "mem" &&
resource.type() == Value::SCALAR) {
mem = resource.scalar().value();
}
}
// Launch tasks.
vector<TaskInfo> tasks;
while (tasksLaunched < totalTasks &&
cpus >= CPUS_PER_TASK &&
mem >= MEM_PER_TASK) {
int taskId = tasksLaunched++;
cout << "Starting task " << taskId << " on "
<< offer.hostname() << endl;
TaskInfo task;
task.set_name("Task " + lexical_cast<string>(taskId));
task.mutable_task_id()->set_value(lexical_cast<string>(taskId));
task.mutable_slave_id()->MergeFrom(offer.slave_id());
task.mutable_command()->set_value("echo hello");
// Use Docker to run the task.
ContainerInfo containerInfo;
containerInfo.set_type(ContainerInfo::DOCKER);
ContainerInfo::DockerInfo dockerInfo;
dockerInfo.set_image("busybox");
containerInfo.mutable_docker()->CopyFrom(dockerInfo);
task.mutable_container()->CopyFrom(containerInfo);
Resource* resource;
resource = task.add_resources();
resource->set_name("cpus");
resource->set_type(Value::SCALAR);
resource->mutable_scalar()->set_value(CPUS_PER_TASK);
resource = task.add_resources();
resource->set_name("mem");
resource->set_type(Value::SCALAR);
resource->mutable_scalar()->set_value(MEM_PER_TASK);
tasks.push_back(task);
cpus -= CPUS_PER_TASK;
mem -= MEM_PER_TASK;
}
driver->launchTasks(offer.id(), tasks);
}
}
示例9: fetcher
// This test verifies that persistent volumes are unmounted properly
// after a checkpointed framework disappears and the slave restarts.
//
// TODO(jieyu): Even though the command task specifies a new
// filesystem root, the executor (command executor) itself does not
// change filesystem root (uses the host filesystem). We need to add a
// test to test the scenario that the executor itself changes rootfs.
TEST_F(LinuxFilesystemIsolatorMesosTest,
ROOT_RecoverOrphanedPersistentVolume)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.resources = "cpus:2;mem:1024;disk(role1):1024";
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Fetcher fetcher(flags);
Try<MesosContainerizer*> create =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(create);
Owned<Containerizer> containerizer(create.get());
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(
detector.get(),
containerizer.get(),
flags);
ASSERT_SOME(slave);
MockScheduler sched;
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_roles(0, "role1");
frameworkInfo.set_checkpoint(true);
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
string dir1 = path::join(sandbox.get(), "dir1");
ASSERT_SOME(os::mkdir(dir1));
Resource persistentVolume = createPersistentVolume(
Megabytes(64),
"role1",
"id1",
"path1",
None(),
None(),
frameworkInfo.principal());
// Create a task that does nothing for a long time.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:512").get() + persistentVolume,
"sleep 1000");
task.mutable_container()->CopyFrom(createContainerInfo(
"test_image",
{createVolumeHostPath("/tmp", dir1, Volume::RW)}));
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillRepeatedly(DoDefault());
Future<Nothing> ack =
FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
// Create the persistent volumes and launch task via `acceptOffers`.
driver.acceptOffers(
//.........这里部分代码省略.........
示例10: driver
// This test verifies that the framework can launch a command task
// that specifies both container image and persistent volumes.
TEST_F(LinuxFilesystemIsolatorMesosTest,
ROOT_ChangeRootFilesystemCommandExecutorPersistentVolume)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.resources = "cpus:2;mem:1024;disk(role1):1024";
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
frameworkInfo.set_roles(0, "role1");
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
master.get()->pid,
DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
Offer offer = offers.get()[0];
string dir1 = path::join(sandbox.get(), "dir1");
ASSERT_SOME(os::mkdir(dir1));
Resource persistentVolume = createPersistentVolume(
Megabytes(64),
"role1",
"id1",
"path1",
None(),
None(),
frameworkInfo.principal());
// We use the filter explicitly here so that the resources will not
// be filtered for 5 seconds (the default).
Filters filters;
filters.set_refuse_seconds(0);
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:512").get() + persistentVolume,
"echo abc > path1/file");
task.mutable_container()->CopyFrom(createContainerInfo(
"test_image",
{createVolumeHostPath("/tmp", dir1, Volume::RW)}));
// Create the persistent volumes and launch task via `acceptOffers`.
driver.acceptOffers(
{offer.id()},
{CREATE(persistentVolume), LAUNCH({task})},
filters);
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
AWAIT_READY(statusStarting);
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
//.........这里部分代码省略.........
示例11: driver
// This test checks the behavior of passed invalid limits.
TEST_F(PosixRLimitsIsolatorTest, InvalidLimits)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "posix/rlimits";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid, DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(_, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_NE(0u, offers->size());
TaskInfo task = createTask(
offers.get()[0].slave_id(),
offers.get()[0].resources(),
"true");
ContainerInfo* container = task.mutable_container();
container->set_type(ContainerInfo::MESOS);
// Set impossible limit soft > hard.
RLimitInfo rlimitInfo;
RLimitInfo::RLimit* rlimit = rlimitInfo.add_rlimits();
rlimit->set_type(RLimitInfo::RLimit::RLMT_CPU);
rlimit->set_soft(100);
rlimit->set_hard(1);
container->mutable_rlimit_info()->CopyFrom(rlimitInfo);
Future<TaskStatus> taskStatus;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&taskStatus));
driver.launchTasks(offers.get()[0].id(), {task});
AWAIT_READY(taskStatus);
EXPECT_EQ(task.task_id(), taskStatus->task_id());
EXPECT_EQ(TASK_FAILED, taskStatus->state());
EXPECT_EQ(TaskStatus::REASON_EXECUTOR_TERMINATED, taskStatus->reason());
driver.stop();
driver.join();
}